Instruction
stringlengths 13
145k
| input_code
stringlengths 35
390k
| output_code
stringlengths 35
390k
|
|---|---|---|
Separer les differentes actions d'edition d'un groupe en plusieurs pages
Il faudrait diviser les actions des pages d'edition en plusieurs sous pages
- [ ] Convertir les pages pour utiliser le template update base
- [ ] Mettre les membres dans une nouvelle page
|
server/apps/group/urls.py
<|code_start|>from django.conf.urls import url
from django.urls import path
from .views import *
app_name = 'group'
urlpatterns = [
path('<slug:pk>/', DetailClubView.as_view(), name='detail'),
path('<slug:pk>/edit', UpdateClubView.as_view(), name='update'),
path('<slug:group_slug>/member/add/<slug:user_id>', add_member, name='add-member'),
path('<slug:pk>/members/edit', edit_named_memberships, name='editNamedMemberships'),
path('', ListClubView.as_view(), name='list'),
path('<slug:group_slug>/events/edit', UpdateClubEventsView.as_view(), name='update-events')
]<|code_end|>
server/apps/group/views.py
<|code_start|>from django.shortcuts import redirect, render
from django.views.generic import DetailView, UpdateView, ListView, View
from .models import Club, Group, NamedMembership
from .forms import NamedMembershipClubFormset
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django.views.decorators.http import require_http_methods
from django.contrib.auth.mixins import UserPassesTestMixin
from apps.student.models import Student
from apps.event.models import Event
from apps.event.forms import EventGroupFormSet
class ListClubView(ListView):
model = Club
template_name = 'group/club_list.html'
class UpdateClubView(UpdateView):
model = Club
template_name = 'group/club_update.html'
fields = ['description', 'admins', 'logo']
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
memberships = NamedMembership.objects.filter(group=self.object)
membersForm = NamedMembershipClubFormset(queryset=memberships)
context['members'] = membersForm
return context
class UpdateClubEventsView(UserPassesTestMixin, View):
template_name = 'group/club_events_update.html'
def test_func(self):
group = Group.get_group_by_slug(self.kwargs['group_slug'])
return group.is_admin(self.request.user)
def get_context_data(self, **kwargs):
context = {}
context['object'] = Group.get_group_by_slug(kwargs['group_slug'])
context['events'] = Event.objects.filter(group=kwargs['group_slug'])
context['form'] = EventGroupFormSet(queryset=context['events'])
return context
def get(self, request, group_slug):
return render(request, self.template_name, context=self.get_context_data(group_slug=group_slug))
def post(self, request, group_slug):
return edit_events(request, group_slug)
def delete(self, request, group_slug, event_id):
print('Hello')
event = Event.objects.delete(group=group_slug, id=event_id)
return redirect('group:update-events')
class DetailClubView(DetailView):
model = Club
template_name = 'group/club_detail.html'
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
members = NamedMembership.objects.filter(group=self.object)
context['members'] = members
return context
@login_required
def add_member(request, group_slug, student_id):
"""Add a user to a club"""
group = Group.get_group_by_slug(group_slug)
student = Student.objects.get(id=student_id)
if isinstance(Club, group):
NamedMembership.objects.create(student=student, group=group)
@require_http_methods(['POST'])
@login_required
def edit_named_memberships(request, pk):
club = Club.objects.get(pk=pk)
form = NamedMembershipClubFormset(request.POST)
if form.is_valid():
members = form.save(commit=False)
for member in members:
member.group = club
member.save()
for member in form.deleted_objects:
member.delete()
messages.success(request, 'Membres modifies')
return redirect('group:update', pk)
else:
messages.warning(request, form.errors)
return redirect('group:update', pk)
@login_required
def edit_events(request, group_slug):
group = Group.get_group_by_slug(group_slug)
form = EventGroupFormSet(request.POST)
if form.is_valid():
events = form.save(commit=False)
# Link each event to the group
for event in events:
event.group = group.slug
event.save()
# Delete missing events
for event in form.deleted_objects:
event.delete()
messages.success(request, 'Events modifies')
return redirect('group:update-events', group_slug)
else:
messages.warning(request, form.errors)
return redirect('group:update-events', group_slug)
<|code_end|>
|
server/apps/group/urls.py
<|code_start|>from django.conf.urls import url
from django.urls import path
from .views import *
app_name = 'group'
urlpatterns = [
path('<slug:pk>/', DetailClubView.as_view(), name='detail'),
path('<slug:pk>/edit', UpdateClubView.as_view(), name='update'),
path('<slug:group_slug>/member/add/<slug:user_id>', add_member, name='add-member'),
path('', ListClubView.as_view(), name='list'),
path('<slug:group_slug>/events/edit', UpdateGroupEventsView.as_view(), name='update-events'),
path('<slug:group_slug>/members/edit', UpdateGroupMembersView.as_view(), name='update-members')
]<|code_end|>
server/apps/group/views.py
<|code_start|>from django.shortcuts import redirect, render
from django.views.generic import DetailView, UpdateView, ListView, View
from .models import Club, Group, NamedMembership
from .forms import NamedMembershipClubFormset
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django.views.decorators.http import require_http_methods
from django.contrib.auth.mixins import UserPassesTestMixin
from apps.student.models import Student
from apps.event.models import Event
from apps.event.forms import EventGroupFormSet
class ListClubView(ListView):
model = Club
template_name = 'group/club_list.html'
class UpdateClubView(UpdateView):
model = Club
template_name = 'group/club_update.html'
fields = ['description', 'admins', 'logo']
class UpdateGroupEventsView(UserPassesTestMixin, View):
template_name = 'group/club_events_update.html'
def test_func(self):
group = Group.get_group_by_slug(self.kwargs['group_slug'])
return group.is_admin(self.request.user)
def get_context_data(self, **kwargs):
context = {}
context['object'] = Group.get_group_by_slug(kwargs['group_slug'])
context['events'] = Event.objects.filter(group=kwargs['group_slug'])
context['form'] = EventGroupFormSet(queryset=context['events'])
return context
def get(self, request, group_slug):
return render(request, self.template_name, context=self.get_context_data(group_slug=group_slug))
def post(self, request, group_slug):
return edit_events(request, group_slug)
class UpdateGroupMembersView(UserPassesTestMixin, View):
template_name = 'group/club_members_update.html'
def test_func(self):
group = Group.get_group_by_slug(self.kwargs['group_slug'])
return group.is_admin(self.request.user)
def get_context_data(self, **kwargs):
context = {}
context['object'] = Group.get_group_by_slug(kwargs['group_slug'])
if isinstance(context['object'], Club):
memberships = NamedMembership.objects.filter(group=context['object'])
membersForm = NamedMembershipClubFormset(queryset=memberships)
context['members'] = membersForm
return context
def get(self, request, group_slug):
return render(request, self.template_name, context=self.get_context_data(group_slug=group_slug))
def post(self, request, group_slug):
return edit_named_memberships(request, group_slug)
class DetailClubView(DetailView):
model = Club
template_name = 'group/club_detail.html'
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
members = NamedMembership.objects.filter(group=self.object)
context['members'] = members
return context
@login_required
def add_member(request, group_slug, student_id):
"""Add a user to a club"""
group = Group.get_group_by_slug(group_slug)
student = Student.objects.get(id=student_id)
if isinstance(Club, group):
NamedMembership.objects.create(student=student, group=group)
@require_http_methods(['POST'])
@login_required
def edit_named_memberships(request, group_slug):
club = Club.objects.filter(slug=group_slug).first()
form = NamedMembershipClubFormset(request.POST)
if form.is_valid():
members = form.save(commit=False)
for member in members:
member.group = club
member.save()
for member in form.deleted_objects:
member.delete()
messages.success(request, 'Membres modifies')
return redirect('group:update', club.id)
else:
messages.warning(request, form.errors)
return redirect('group:update', club.id)
@login_required
def edit_events(request, group_slug):
group = Group.get_group_by_slug(group_slug)
form = EventGroupFormSet(request.POST)
if form.is_valid():
events = form.save(commit=False)
# Link each event to the group
for event in events:
event.group = group.slug
event.save()
# Delete missing events
for event in form.deleted_objects:
event.delete()
messages.success(request, 'Events modifies')
return redirect('group:update-events', group_slug)
else:
messages.warning(request, form.errors)
return redirect('group:update-events', group_slug)
<|code_end|>
|
Ajouter les parents dans les groupes
Ajouter un field parent dans les groupes pour pouvoir faire des liens entre groupes
|
server/apps/group/migrations/0004_club_parent.py
<|code_start|><|code_end|>
server/apps/group/models.py
<|code_start|>from django.db import models
from django.contrib.auth.models import User
from django.utils.text import slugify
from django.urls.base import reverse
from apps.student.models import Student
from apps.utils.upload import PathAndRename
from model_utils.managers import InheritanceManager
TYPE_BDX = [
('BDA', 'Bureau des Arts'),
('BDE', 'Bureau des Élèves'),
('BDS', 'Bureau des Sports'),
('Asso', 'Association')
]
path_and_rename = PathAndRename("groups/logo")
class Group(models.Model):
name = models.CharField(verbose_name='Nom du groupe', unique=True, max_length=200)
description = models.TextField(verbose_name='Description du groupe', blank=True)
admins = models.ManyToManyField(Student, verbose_name='Administrateur.rice.s du groupe', related_name='admins')
members = models.ManyToManyField(Student, verbose_name='Membres du groupe', related_name='members')
logo = models.ImageField(verbose_name='Logo du groupe', blank=True, null=True, upload_to=path_and_rename)
slug = models.SlugField(max_length=40, unique=True, blank=True)
class Meta:
abstract = True
def __str__(self):
return self.name
def is_admin(self, user: User) -> bool:
"""Indicates if a user is admin."""
student = Student.objects.filter(user=user).first()
return student in self.admins.all()
@staticmethod
def get_group_by_slug(slug: str):
"""Get a group from a slug."""
type_slug = slug.split('--')[0]
if type_slug == 'club':
return Club.objects.get(slug=slug)
else:
return Group.objects.get(slug=slug)
@property
def get_absolute_url(self):
return reverse('group:detail', kwargs={'pk': self.pk})
class Club(Group):
members = models.ManyToManyField(Student, through='NamedMembership')
bdx_type = models.CharField(verbose_name='Type de club BDX', choices=TYPE_BDX, max_length=60)
def save(self, *args, **kwargs):
self.slug = f'club--{slugify(self.name)}'
super(Club, self).save(*args, **kwargs)
class NamedMembership(models.Model):
function = models.CharField(verbose_name='Poste occupé', max_length=200, blank=True)
year = models.IntegerField(verbose_name='Année du poste', blank=True, null=True)
student = models.ForeignKey(Student, on_delete=models.CASCADE)
group = models.ForeignKey(Club, on_delete=models.CASCADE)<|code_end|>
server/apps/group/views.py
<|code_start|>from django.shortcuts import redirect, render
from django.views.generic import DetailView, UpdateView, ListView, View
from .models import Club, Group, NamedMembership
from .forms import NamedMembershipClubFormset
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django.views.decorators.http import require_http_methods
from django.contrib.auth.mixins import UserPassesTestMixin
from apps.student.models import Student
from apps.event.models import Event
from apps.event.forms import EventFormSet
class ListClubView(ListView):
model = Club
template_name = 'group/club_list.html'
class UpdateClubView(UpdateView):
model = Club
template_name = 'group/club_update.html'
fields = ['description', 'admins', 'logo']
class UpdateGroupEventsView(UserPassesTestMixin, View):
template_name = 'group/club_events_update.html'
def test_func(self):
group = Group.get_group_by_slug(self.kwargs['group_slug'])
return group.is_admin(self.request.user)
def get_context_data(self, **kwargs):
context = {}
context['object'] = Group.get_group_by_slug(kwargs['group_slug'])
context['events'] = Event.objects.filter(group=kwargs['group_slug'])
context['form'] = EventFormSet(queryset=context['events'])
return context
def get(self, request, group_slug):
return render(request, self.template_name, context=self.get_context_data(group_slug=group_slug))
def post(self, request, group_slug):
return edit_events(request, group_slug)
class UpdateGroupMembersView(UserPassesTestMixin, View):
template_name = 'group/club_members_update.html'
def test_func(self):
group = Group.get_group_by_slug(self.kwargs['group_slug'])
return group.is_admin(self.request.user)
def get_context_data(self, **kwargs):
context = {}
context['object'] = Group.get_group_by_slug(kwargs['group_slug'])
if isinstance(context['object'], Club):
memberships = NamedMembership.objects.filter(group=context['object'])
membersForm = NamedMembershipClubFormset(queryset=memberships)
context['members'] = membersForm
return context
def get(self, request, group_slug):
return render(request, self.template_name, context=self.get_context_data(group_slug=group_slug))
def post(self, request, group_slug):
return edit_named_memberships(request, group_slug)
class DetailClubView(DetailView):
model = Club
template_name = 'group/club_detail.html'
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
members = NamedMembership.objects.filter(group=self.object)
context['members'] = members
context['is_admin'] = self.object.is_admin(self.request.user)
context['events'] = Event.objects.filter(group=self.object.slug)
return context
@login_required
def add_member(request, group_slug, student_id):
"""Add a user to a club"""
group = Group.get_group_by_slug(group_slug)
student = Student.objects.get(id=student_id)
if isinstance(Club, group):
NamedMembership.objects.create(student=student, group=group)
@require_http_methods(['POST'])
@login_required
def edit_named_memberships(request, group_slug):
club = Club.objects.filter(slug=group_slug).first()
form = NamedMembershipClubFormset(request.POST)
if form.is_valid():
members = form.save(commit=False)
for member in members:
member.group = club
member.save()
for member in form.deleted_objects:
member.delete()
messages.success(request, 'Membres modifies')
return redirect('group:update', club.id)
else:
messages.warning(request, form.errors)
return redirect('group:update', club.id)
@login_required
def edit_events(request, group_slug):
group = Group.get_group_by_slug(group_slug)
form = EventFormSet(request.POST)
if form.is_valid():
events = form.save(commit=False)
# Link each event to the group
for event in events:
event.group = group.slug
event.save()
# Delete missing events
for event in form.deleted_objects:
event.delete()
messages.success(request, 'Events modifies')
return redirect('group:update-events', group_slug)
else:
messages.warning(request, form.errors)
return redirect('group:update-events', group_slug)
<|code_end|>
|
server/apps/group/migrations/0004_club_parent.py
<|code_start|># Generated by Django 3.0.5 on 2020-06-02 07:16
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('group', '0003_auto_20200601_2109'),
]
operations = [
migrations.AddField(
model_name='club',
name='parent',
field=models.SlugField(blank=True, max_length=40, null=True),
),
]
<|code_end|>
server/apps/group/models.py
<|code_start|>from django.db import models
from django.contrib.auth.models import User
from django.utils.text import slugify
from django.urls.base import reverse
from apps.student.models import Student
from apps.utils.upload import PathAndRename
from model_utils.managers import InheritanceManager
TYPE_BDX = [
('BDA', 'Bureau des Arts'),
('BDE', 'Bureau des Élèves'),
('BDS', 'Bureau des Sports'),
('Asso', 'Association')
]
path_and_rename = PathAndRename("groups/logo")
class Group(models.Model):
name = models.CharField(verbose_name='Nom du groupe', unique=True, max_length=200)
description = models.TextField(verbose_name='Description du groupe', blank=True)
admins = models.ManyToManyField(Student, verbose_name='Administrateur.rice.s du groupe', related_name='admins')
members = models.ManyToManyField(Student, verbose_name='Membres du groupe', related_name='members')
logo = models.ImageField(verbose_name='Logo du groupe', blank=True, null=True, upload_to=path_and_rename)
slug = models.SlugField(max_length=40, unique=True, blank=True)
parent = models.SlugField(max_length=40, blank=True, null=True)
class Meta:
abstract = True
def __str__(self):
return self.name
def is_admin(self, user: User) -> bool:
"""Indicates if a user is admin."""
student = Student.objects.filter(user=user).first()
return student in self.admins.all() or self.get_parent().is_admin(user)
@property
def get_parent(self):
"""Get the parent group of this group."""
return Group.get_group_by_slug(self.parent)
@staticmethod
def get_group_by_slug(slug: str):
"""Get a group from a slug."""
type_slug = slug.split('--')[0]
if type_slug == 'club':
return Club.objects.get(slug=slug)
else:
return Group.objects.get(slug=slug)
@property
def get_absolute_url(self):
return reverse('group:detail', kwargs={'pk': self.pk})
class Club(Group):
members = models.ManyToManyField(Student, through='NamedMembership')
bdx_type = models.CharField(verbose_name='Type de club BDX', choices=TYPE_BDX, max_length=60)
def save(self, *args, **kwargs):
self.slug = f'club--{slugify(self.name)}'
super(Club, self).save(*args, **kwargs)
class NamedMembership(models.Model):
function = models.CharField(verbose_name='Poste occupé', max_length=200, blank=True)
year = models.IntegerField(verbose_name='Année du poste', blank=True, null=True)
student = models.ForeignKey(Student, on_delete=models.CASCADE)
group = models.ForeignKey(Club, on_delete=models.CASCADE)<|code_end|>
server/apps/group/views.py
<|code_start|>from django.shortcuts import redirect, render
from django.views.generic import DetailView, UpdateView, ListView, View
from .models import Club, Group, NamedMembership
from .forms import NamedMembershipClubFormset
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django.views.decorators.http import require_http_methods
from django.contrib.auth.mixins import UserPassesTestMixin
from apps.student.models import Student
from apps.event.models import Event
from apps.event.forms import EventFormSet
class ListClubView(ListView):
model = Club
template_name = 'group/club_list.html'
class UpdateClubView(UpdateView):
model = Club
template_name = 'group/club_update.html'
fields = ['description', 'admins', 'logo']
class UpdateGroupEventsView(UserPassesTestMixin, View):
template_name = 'group/club_events_update.html'
def test_func(self):
if self.request.user.is_authenticated:
group = Group.get_group_by_slug(self.kwargs['group_slug'])
return group.is_admin(self.request.user)
return False
def get_context_data(self, **kwargs):
context = {}
context['object'] = Group.get_group_by_slug(kwargs['group_slug'])
context['events'] = Event.objects.filter(group=kwargs['group_slug'])
context['form'] = EventFormSet(queryset=context['events'])
return context
def get(self, request, group_slug):
return render(request, self.template_name, context=self.get_context_data(group_slug=group_slug))
def post(self, request, group_slug):
return edit_events(request, group_slug)
class UpdateGroupMembersView(UserPassesTestMixin, View):
template_name = 'group/club_members_update.html'
def test_func(self):
if self.request.user.is_authenticated:
group = Group.get_group_by_slug(self.kwargs['group_slug'])
return group.is_admin(self.request.user)
return False
def get_context_data(self, **kwargs):
context = {}
context['object'] = Group.get_group_by_slug(kwargs['group_slug'])
if isinstance(context['object'], Club):
memberships = NamedMembership.objects.filter(group=context['object'])
membersForm = NamedMembershipClubFormset(queryset=memberships)
context['members'] = membersForm
return context
def get(self, request, group_slug):
return render(request, self.template_name, context=self.get_context_data(group_slug=group_slug))
def post(self, request, group_slug):
return edit_named_memberships(request, group_slug)
class DetailClubView(DetailView):
model = Club
template_name = 'group/club_detail.html'
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
members = NamedMembership.objects.filter(group=self.object)
context['members'] = members
context['is_admin'] = self.object.is_admin(self.request.user) if self.request.user.is_authenticated else False
context['events'] = Event.objects.filter(group=self.object.slug)
return context
@login_required
def add_member(request, group_slug, student_id):
"""Add a user to a club"""
group = Group.get_group_by_slug(group_slug)
student = Student.objects.get(id=student_id)
if isinstance(Club, group):
NamedMembership.objects.create(student=student, group=group)
@require_http_methods(['POST'])
@login_required
def edit_named_memberships(request, group_slug):
club = Club.objects.filter(slug=group_slug).first()
form = NamedMembershipClubFormset(request.POST)
if form.is_valid():
members = form.save(commit=False)
for member in members:
member.group = club
member.save()
for member in form.deleted_objects:
member.delete()
messages.success(request, 'Membres modifies')
return redirect('group:update', club.id)
else:
messages.warning(request, form.errors)
return redirect('group:update', club.id)
@login_required
def edit_events(request, group_slug):
group = Group.get_group_by_slug(group_slug)
form = EventFormSet(request.POST)
if form.is_valid():
events = form.save(commit=False)
# Link each event to the group
for event in events:
event.group = group.slug
event.save()
# Delete missing events
for event in form.deleted_objects:
event.delete()
messages.success(request, 'Events modifies')
return redirect('group:update-events', group_slug)
else:
messages.warning(request, form.errors)
return redirect('group:update-events', group_slug)
<|code_end|>
|
Frontend events planifés et events archivés ne fonctionnent pas
Events archivés renvoit une erreur 500.
Event planifiés ne renvoit rien
|
server/apps/event/api_views.py
<|code_start|>from datetime import datetime
from rest_framework import generics
from .models import BaseEvent
from .serializers import BaseEventSerializer
class ListEventsGroupAPIView(generics.ListAPIView):
"""List events for a group depending on the chosen
time window. By default only returns current events."""
serializer_class = BaseEventSerializer
def get_queryset(self):
if self.request.method == 'GET':
if self.request.GET.get('view') == 'archives':
return BaseEvent.objects.filter(group=self.kwargs['group'], date__lt=datetime.today())
elif self.request.get('view') == 'all':
return BaseEvent.objects.filter(group=self.kwargs['group'])
return BaseEvent.objects.filter(group=self.kwargs['group'], date__gte=datetime.today())
class UpdateEventAPIView(generics.RetrieveDestroyAPIView):
serializer_class = BaseEventSerializer
lookup_field = 'slug'
lookup_url_kwarg = 'event_slug'
def get_queryset(self):
return BaseEvent.objects.filter(slug=self.kwargs['event_slug'])
<|code_end|>
server/apps/event/views.py
<|code_start|>from datetime import date
from django.shortcuts import redirect, render
from django.contrib import messages
from django.contrib.auth.models import User
from django.views.generic.base import TemplateView, View
from django.views.generic import UpdateView, FormView
from django.contrib.auth.decorators import login_required
from .models import *
from .forms import EventForm, EventFormSet
from apps.group.models import Group
from apps.utils.accessMixins import LoginRequiredAccessMixin, UserIsAdmin
class EventDetailView(LoginRequiredAccessMixin, TemplateView):
template_name = 'event/detail.html'
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
self.object = BaseEvent.get_event_by_slug(self.kwargs['event_slug'])
context['object'] = self.object
context['group'] = self.object.get_group
context['is_participating'] = self.object.is_participating(
self.request.user)
return context
class UpdateGroupCreateEventView(UserIsAdmin, FormView):
"""In the context of a group, create event view."""
template_name = 'group/event/create.html'
form_class = EventForm
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['object'] = Group.get_group_by_slug(self.kwargs['group_slug'])
return context
def form_valid(self, form, **kwargs):
event = form.save(commit=False)
event.group = Group.get_group_by_slug(
slug=self.kwargs['group_slug']).slug
event.save()
return redirect('group:create-event', self.kwargs['group_slug'])
class EventUpdateView(UserIsAdmin, UpdateView):
template_name = 'event/update.html'
fields = ['title', 'description', 'location',
'date', 'publicity', 'color', 'image']
def test_func(self) -> bool:
self.kwargs['group_slug'] = self.object.get_group.slug
return super().test_func()
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['object'] = self.object.get_group
context['event'] = self.object
return context
def get_object(self, **kwargs):
return BaseEvent.get_event_by_slug(self.kwargs['event_slug'])
def dispatch(self, request, *args, **kwargs):
self.object = BaseEvent.get_event_by_slug(self.kwargs['event_slug'])
self.kwargs['group_slug'] = self.object.get_group.slug
if isinstance(self.object, EatingEvent):
self.fields = ['title', 'description', 'location',
'date', 'publicity', 'color', 'image', 'menu']
return super().dispatch(request, *args, **kwargs)
class UpdateGroupEventsView(UserIsAdmin, View):
template_name = 'group/event/planned_edit.html'
def get_context_data(self, **kwargs):
context = {}
context['object'] = Group.get_group_by_slug(kwargs['group_slug'])
context['events'] = BaseEvent.objects.filter(
group=kwargs['group_slug'], date__gte=date.today())
context['form'] = EventFormSet(queryset=context['events'])
return context
def get(self, request, group_slug):
return render(request, self.template_name, context=self.get_context_data(group_slug=group_slug))
def post(self, request, group_slug):
return edit_events(request, group_slug)
class UpdateGroupArchivedEventsView(UserIsAdmin, View):
template_name = 'group/archived_edit.html'
def get_context_data(self, **kwargs):
context = {}
context['object'] = Group.get_group_by_slug(kwargs['group_slug'])
context['events'] = BaseEvent.objects.filter(
group=kwargs['group_slug'], date__lt=date.today())
context['form'] = EventFormSet(queryset=context['events'])
return context
def get(self, request, group_slug):
return render(request, self.template_name, context=self.get_context_data(group_slug=group_slug))
def post(self, request, group_slug):
return edit_events(request, group_slug)
@login_required
def add_participant(request, event_slug):
"""Adds the user to the list of participants."""
event = BaseEvent.get_event_by_slug(event_slug)
event.participants.add(request.user.student)
if request.GET.get('redirect'):
return redirect('home:home')
return redirect(event.get_absolute_url())
@login_required
def remove_participant(request, event_slug):
"""Removes the user from the list of participants."""
event = BaseEvent.get_event_by_slug(event_slug)
event.participants.remove(request.user.student)
if request.GET.get('redirect'):
return redirect('home:home')
return redirect(event.get_absolute_url())
@login_required
def edit_events(request, group_slug):
group = Group.get_group_by_slug(group_slug)
form = EventFormSet(request.POST)
if form.is_valid():
events = form.save(commit=False)
# Link each event to the group
for event in events:
event.group = group.slug
event.save()
# Delete missing events
for event in form.deleted_objects:
event.delete()
messages.success(request, 'Events modifies')
return redirect('group:update-events', group_slug)
else:
messages.warning(request, form.errors)
return redirect('group:update-events', group_slug)
<|code_end|>
|
server/apps/event/api_views.py
<|code_start|>from datetime import datetime
from rest_framework import generics
from .models import BaseEvent
from .serializers import BaseEventSerializer
class ListEventsGroupAPIView(generics.ListAPIView):
"""List events for a group depending on the chosen
time window. By default only returns current events."""
serializer_class = BaseEventSerializer
def get_queryset(self):
if self.request.method == 'GET':
if self.request.GET.get('view') == 'archives':
return BaseEvent.objects.filter(group=self.kwargs['group'], date__lt=datetime.today())
elif self.request.GET.get('view') == 'all':
return BaseEvent.objects.filter(group=self.kwargs['group'])
return BaseEvent.objects.filter(group=self.kwargs['group'], date__gte=datetime.today())
class UpdateEventAPIView(generics.RetrieveDestroyAPIView):
serializer_class = BaseEventSerializer
lookup_field = 'slug'
lookup_url_kwarg = 'event_slug'
def get_queryset(self):
return BaseEvent.objects.filter(slug=self.kwargs['event_slug'])
<|code_end|>
server/apps/event/views.py
<|code_start|>from datetime import date
from django.shortcuts import redirect, render
from django.contrib import messages
from django.contrib.auth.models import User
from django.views.generic.base import TemplateView, View
from django.views.generic import UpdateView, FormView
from django.contrib.auth.decorators import login_required
from .models import *
from .forms import EventForm, EventFormSet
from apps.group.models import Group
from apps.utils.accessMixins import LoginRequiredAccessMixin, UserIsAdmin
class EventDetailView(LoginRequiredAccessMixin, TemplateView):
template_name = 'event/detail.html'
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
self.object = BaseEvent.get_event_by_slug(self.kwargs['event_slug'])
context['object'] = self.object
context['group'] = self.object.get_group
context['is_participating'] = self.object.is_participating(
self.request.user)
return context
class UpdateGroupCreateEventView(UserIsAdmin, FormView):
"""In the context of a group, create event view."""
template_name = 'group/event/create.html'
form_class = EventForm
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['object'] = Group.get_group_by_slug(self.kwargs['group_slug'])
return context
def form_valid(self, form, **kwargs):
event = form.save(commit=False)
event.group = Group.get_group_by_slug(
slug=self.kwargs['group_slug']).slug
event.save()
return redirect('group:create-event', self.kwargs['group_slug'])
class EventUpdateView(UserIsAdmin, UpdateView):
template_name = 'event/update.html'
fields = ['title', 'description', 'location',
'date', 'publicity', 'color', 'image']
def test_func(self) -> bool:
self.kwargs['group_slug'] = self.object.get_group.slug
return super().test_func()
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['object'] = self.object.get_group
context['event'] = self.object
return context
def get_object(self, **kwargs):
return BaseEvent.get_event_by_slug(self.kwargs['event_slug'])
def dispatch(self, request, *args, **kwargs):
self.object = BaseEvent.get_event_by_slug(self.kwargs['event_slug'])
self.kwargs['group_slug'] = self.object.get_group.slug
if isinstance(self.object, EatingEvent):
self.fields = ['title', 'description', 'location',
'date', 'publicity', 'color', 'image', 'menu']
return super().dispatch(request, *args, **kwargs)
class UpdateGroupEventsView(UserIsAdmin, View):
template_name = 'group/event/planned_edit.html'
def get_context_data(self, **kwargs):
context = {}
context['object'] = Group.get_group_by_slug(kwargs['group_slug'])
context['events'] = BaseEvent.objects.filter(
group=kwargs['group_slug'], date__gte=date.today())
context['form'] = EventFormSet(queryset=context['events'])
return context
def get(self, request, group_slug):
return render(request, self.template_name, context=self.get_context_data(group_slug=group_slug))
def post(self, request, group_slug):
return edit_events(request, group_slug)
class UpdateGroupArchivedEventsView(UserIsAdmin, View):
template_name = 'group/event/archived_edit.html'
def get_context_data(self, **kwargs):
context = {}
context['object'] = Group.get_group_by_slug(kwargs['group_slug'])
context['events'] = BaseEvent.objects.filter(
group=kwargs['group_slug'], date__lte=date.today())
context['form'] = EventFormSet(queryset=context['events'])
return context
def get(self, request, group_slug):
return render(request, self.template_name, context=self.get_context_data(group_slug=group_slug))
def post(self, request, group_slug):
return edit_events(request, group_slug)
@login_required
def add_participant(request, event_slug):
"""Adds the user to the list of participants."""
event = BaseEvent.get_event_by_slug(event_slug)
event.participants.add(request.user.student)
if request.GET.get('redirect'):
return redirect('home:home')
return redirect(event.get_absolute_url())
@login_required
def remove_participant(request, event_slug):
"""Removes the user from the list of participants."""
event = BaseEvent.get_event_by_slug(event_slug)
event.participants.remove(request.user.student)
if request.GET.get('redirect'):
return redirect('home:home')
return redirect(event.get_absolute_url())
@login_required
def edit_events(request, group_slug):
group = Group.get_group_by_slug(group_slug)
form = EventFormSet(request.POST)
if form.is_valid():
events = form.save(commit=False)
# Link each event to the group
for event in events:
event.group = group.slug
event.save()
# Delete missing events
for event in form.deleted_objects:
event.delete()
messages.success(request, 'Events modifies')
return redirect('group:update-events', group_slug)
else:
messages.warning(request, form.errors)
return redirect('group:update-events', group_slug)
<|code_end|>
|
Mot de passe perdu
Cette fonctionnalité de fonctionne pas sur mon ordinateur mais fonctionne sur l'ordinateur de Gabin Schieffer <br/> Proposé par [email protected]
|
server/apps/account/urls.py
<|code_start|>from django.conf.urls import url
from django.urls import path
from .views import *
app_name = 'account'
urlpatterns = [
path('login', AuthView.as_view(), name='login'),
path('logout', LogoutView.as_view(), name='logout'),
path('registration', RegistrationView.as_view(), name='registration'),
path('registration/temporary/<int:id>/approve', ApproveTemporaryRegistrationView.as_view(),
name='temp-req-approve'),
path('registration/temporary/<int:id>/deny', DenyTemporaryRegistrationView.as_view(),
name='temp-req-deny'),
path('registration/temporary', TemporaryRegistrationView.as_view(),
name='temporary-registration'),
path('activate/<slug:uidb64>/<slug:token>/',
ConfirmUser.as_view(), name='confirm'),
path('activate/<slug:uidb64>/<slug:token>/temporary',
ConfirmUserTemporary.as_view(), name='confirm-temporary'),
path('permanent', PermanentAccountUpgradeView.as_view(),
name='upgrade-permanent'),
path('forgotten', ForgottenPassView.as_view(), name='forgotten_pass'),
path('reset_pass/<slug:uidb64>/<slug:token>',
PasswordResetConfirmCustomView.as_view(), name='reset_pass'),
path('<slug:user_id>/student', redirect_to_student, name='redirect-student'),
]
<|code_end|>
server/apps/account/views.py
<|code_start|>from datetime import date
from typing import Any, Dict, Union
from django.conf import settings
from django.contrib.auth import login, logout
from django.contrib.sites.shortcuts import get_current_site
from django.http.response import HttpResponse
from django.views.generic.edit import FormView
from django.shortcuts import get_object_or_404
from apps.utils.accessMixins import UserIsSuperAdmin
from .forms import SignUpForm, LoginForm, ForgottenPassForm, TemporaryRequestSignUpForm, UpgradePermanentAccountForm
from .tokens import account_activation_token
from django.contrib import messages
from django.shortcuts import render, redirect
from django.template.loader import render_to_string
from django.utils.http import urlsafe_base64_decode, urlsafe_base64_encode
from django.utils.encoding import force_bytes, force_text
from django.urls import reverse, reverse_lazy
from django.contrib.auth.views import PasswordResetConfirmView
from django.views import View
from django.contrib.auth.forms import SetPasswordForm
from django.contrib.auth.mixins import LoginRequiredMixin
from django.contrib.auth.models import User
from apps.student.models import Student
from .emailAuthBackend import EmailBackend
from .models import TemporaryAccessRequest
from .utils import user_creation, send_email_confirmation
class RegistrationView(FormView):
template_name = 'account/registration.html'
form_class = SignUpForm
def get_context_data(self, **kwargs: Any) -> Dict[str, Any]:
context = super().get_context_data(**kwargs)
context['temporary_registration'] = settings.TEMPORARY_ACCOUNTS_DATE_LIMIT >= date.today()
return context
def form_valid(self, form):
user_creation(form, self.request)
return redirect(reverse('home:home'))
class TemporaryRegistrationView(FormView):
form_class = TemporaryRequestSignUpForm
template_name = 'account/temporary_registration.html'
def dispatch(self, request, *args: Any, **kwargs: Any):
"""Do not allow to use this view outside of allowed temporary accounts windows."""
if not settings.TEMPORARY_ACCOUNTS_DATE_LIMIT >= date.today():
return redirect(reverse('account:registration'))
return super().dispatch(request, *args, **kwargs)
def get_context_data(self, **kwargs: Any) -> Dict[str, Any]:
context = super().get_context_data(**kwargs)
context['DEADLINE_TEMPORARY_REGISTRATION'] = settings.TEMPORARY_ACCOUNTS_DATE_LIMIT
return context
def form_valid(self, form) -> HttpResponse:
user_creation(form, self.request)
return redirect(reverse('home:home'))
class ConfirmUser(View):
def get(self, request, uidb64, token):
tempAccessReq: Union[TemporaryAccessRequest, None] = None
try:
uid = force_text(urlsafe_base64_decode(uidb64))
user = User.objects.get(pk=uid)
except (TypeError, ValueError, OverflowError, User.DoesNotExist):
return render(self.request, 'account/activation_invalid.html')
# checking if the user is not a temporary one
try:
tempAccessReq: TemporaryAccessRequest = TemporaryAccessRequest.objects.get(
user=user.pk)
if not tempAccessReq.approved:
return render(self.request, 'account/activation_invalid.html')
except TemporaryAccessRequest.DoesNotExist:
tempAccessReq = None
# checking if the token is valid.
if account_activation_token.check_token(user, token):
# if valid set active true
user.is_active = True
if tempAccessReq is not None:
user.email = tempAccessReq.final_email
tempAccessReq.delete()
messages.warning(
request, f'Dorénavant vous devez utiliser {user.email} pour vous connecter.')
user.save()
login(self.request, user,
backend='apps.account.emailAuthBackend.EmailBackend')
messages.success(request, 'Votre compte est desormais actif !')
return redirect(reverse('home:home'))
else:
return render(self.request, 'account/activation_invalid.html')
class AuthView(FormView):
template_name = 'account/login.html'
form_class = LoginForm
def get(self, request):
if request.user.is_authenticated:
user = request.user
message = f'Vous etes déjà connecté en tant que {user.first_name.title()}.'
messages.warning(request, message)
return redirect(reverse('home:home'))
else:
return super(AuthView, AuthView).get(self, request)
def form_invalid(self, form):
message = f'Veuillez vous connecter avec votre adresse mail ECN.'
messages.warning(self.request, message)
return redirect(reverse('account:login'))
def form_valid(self, form):
username = form.cleaned_data['email']
password = form.cleaned_data['password']
user = EmailBackend.authenticate(username=username, password=password)
if user is not None:
if user.is_active:
message = f'Bonjour {user.first_name.title()} !'
messages.success(self.request, message)
else:
if settings.TEMPORARY_ACCOUNTS_DATE_LIMIT >= date.today():
# During certain periods allow temporary accounts.
try:
temporaryAccessRequest: TemporaryAccessRequest = TemporaryAccessRequest.objects.get(
user=user
)
if not temporaryAccessRequest.mail_valid:
message = 'Votre compte n\'est pas encore actif.\
Veuillez cliquer sur le lien envoyé par mail pour l\'\
activer.'
messages.error(self.request, message)
return redirect(reverse('account:login'))
if temporaryAccessRequest.approved_until <= date.today():
message = 'Votre compte n\'a pas encore été approuvé.\
On vous prévient par mail dès que c\'est le cas.'
messages.error(self.request, message)
return redirect(reverse('account:login'))
message = f'Votre compte n\'est pas encore définitif.\
Veuillez le valider <a href="{reverse("account:upgrade-permanent")}">ici</a>.\
Attention après le {temporaryAccessRequest.approved_until}\
vous ne pourrez plus vous connecter si vous n\'avez pas renseigné votre adresse Centrale.'
messages.warning(self.request, message)
except TemporaryAccessRequest.DoesNotExist:
messages.error(
self.request, 'Identifiant inconnu ou mot de passe invalide.')
return redirect(reverse('account:login'))
else:
messages.warning(
self.request, 'Votre compte n\'est pas encore actif. Veuillez cliquer sur le lien dans \'email.')
login(self.request, user,
backend='apps.account.emailAuthBackend.EmailBackend')
return redirect(reverse('home:home'))
else:
messages.error(
self.request, 'Identifiant inconnu ou mot de passe invalide.')
return redirect(reverse('account:login'))
class LogoutView(View):
def get(self, request):
logout(request)
messages.success(request, 'Vous avez été déconnecté.')
return redirect(reverse('account:login'))
class ForgottenPassView(FormView):
form_class = ForgottenPassForm
template_name = 'account/forgotten_pass.html'
def form_valid(self, form):
user = User.objects.get(email=form.cleaned_data['email'])
if user is not None:
subject = '[Nantral Platform] Reinitialisation de votre mot de passe'
current_site = get_current_site(self.request)
message = render_to_string('account/mail/password_request.html', {
'user': user,
'domain': current_site.domain,
'uidb64': urlsafe_base64_encode(force_bytes(user.pk)),
# method will generate a hash value with user related data
'token': account_activation_token.make_token(user),
})
user.email_user(
subject, message, '[email protected]', html_message=message)
messages.success(
self.request, 'Un email de récuperation a été envoyé si cette adresse existe.')
return redirect(reverse('account:login'))
class PasswordResetConfirmCustomView(PasswordResetConfirmView):
template_name = 'account/reset_password.html'
post_reset_login = True
post_reset_login_backend = 'apps.account.emailAuthBackend.EmailBackend'
form_class = SetPasswordForm
token_generator = account_activation_token
success_url = reverse_lazy('home:home')
def redirect_to_student(request, user_id):
user = User.objects.get(id=user_id)
student = Student.objects.get(user=user)
return redirect('student:update', student.pk)
class ABCApprovalTemporaryResgistrationView(UserIsSuperAdmin, View):
def get(self, request, id):
self.temp_req: TemporaryAccessRequest = get_object_or_404(
TemporaryAccessRequest, id=id)
if self.temp_req.approved:
messages.warning(request, f'Cette requête a déjà été approuvée.')
return redirect(reverse('home:home'))
class ApproveTemporaryRegistrationView(ABCApprovalTemporaryResgistrationView):
def get(self, request, id):
super().get(request, id)
self.temp_req.approve()
messages.success(
request, f'Vous avez accepté la demande de {self.temp_req.user.first_name} {self.temp_req.user.last_name}')
return redirect(reverse('home:home'))
class DenyTemporaryRegistrationView(ABCApprovalTemporaryResgistrationView):
def get(self, request, id):
super().get(request, id)
messages.success(
request, f'Vous avez refusé la demande de {self.temp_req.user.first_name} {self.temp_req.user.last_name}')
self.temp_req.deny()
return redirect(reverse('home:home'))
class ConfirmUserTemporary(View):
def get(self, request, uidb64, token):
try:
uid = force_text(urlsafe_base64_decode(uidb64))
user = User.objects.get(pk=uid)
except (TypeError, ValueError, OverflowError, User.DoesNotExist):
user = None
# checking if the user exists, if the token is valid.
if user is not None and account_activation_token.check_token(user, token):
try:
temp_req: TemporaryAccessRequest = TemporaryAccessRequest.objects.get(
user=user)
temp_req.mail_valid = True
temp_req.save()
except TemporaryAccessRequest.DoesNotExist:
return render(self.request, 'account/activation_invalid.html')
messages.success(request, 'Votre addresse mail est confirmé! \n\
Comme vous n\'avez pas utilisé votre adresse Centrale, vous devez encore attendre qu\'un administrateur vérifie votre inscription.\n\
On vous prévient par mail dès que c\'est bon!. ')
return redirect(reverse('home:home'))
else:
return render(self.request, 'account/activation_invalid.html')
class PermanentAccountUpgradeView(LoginRequiredMixin, FormView):
form_class = UpgradePermanentAccountForm
template_name = 'account/permanent_account_upgrade.html'
success_url = reverse_lazy('home:home')
def get(self, request):
get_object_or_404(
TemporaryAccessRequest,
user=self.request.user
)
return super().get(request)
def form_valid(self, form: UpgradePermanentAccountForm) -> HttpResponse:
temp_request = get_object_or_404(
TemporaryAccessRequest,
user=self.request.user
)
temp_request.final_email = form.cleaned_data['email']
temp_request.save()
send_email_confirmation(
self.request.user, self.request, send_to=form.cleaned_data['email'])
return super().form_valid(form)
<|code_end|>
|
server/apps/account/urls.py
<|code_start|>from django.conf.urls import url
from django.urls import path
from .views import *
app_name = 'account'
urlpatterns = [
path('login', AuthView.as_view(), name='login'),
path('logout', LogoutView.as_view(), name='logout'),
path('registration', RegistrationView.as_view(), name='registration'),
path('registration/temporary/<int:id>/approve', ApproveTemporaryRegistrationView.as_view(),
name='temp-req-approve'),
path('registration/temporary/<int:id>/deny', DenyTemporaryRegistrationView.as_view(),
name='temp-req-deny'),
path('registration/temporary', TemporaryRegistrationView.as_view(),
name='temporary-registration'),
path('activate/<slug:uidb64>/<slug:token>/',
ConfirmUser.as_view(), name='confirm'),
path('activate/<slug:uidb64>/<slug:token>/temporary',
ConfirmUserTemporary.as_view(), name='confirm-temporary'),
path('permanent', PermanentAccountUpgradeView.as_view(),
name='upgrade-permanent'),
path('forgotten', ForgottenPassView.as_view(), name='forgotten_pass'),
path('reset_pass/<slug:uidb64>/<slug:token>/',
PasswordResetConfirmCustomView.as_view(), name='reset_pass'),
path('<slug:user_id>/student', redirect_to_student, name='redirect-student'),
]
<|code_end|>
server/apps/account/views.py
<|code_start|>from datetime import date
from typing import Any, Dict, Union
from django.conf import settings
from django.contrib.auth import login, logout
from django.contrib.sites.shortcuts import get_current_site
from django.http.response import HttpResponse
from django.views.generic.edit import FormView
from django.shortcuts import get_object_or_404
from apps.utils.accessMixins import UserIsSuperAdmin
from .forms import SignUpForm, LoginForm, ForgottenPassForm, TemporaryRequestSignUpForm, UpgradePermanentAccountForm
from .tokens import account_activation_token
from django.contrib import messages
from django.shortcuts import render, redirect
from django.template.loader import render_to_string
from django.utils.http import urlsafe_base64_decode, urlsafe_base64_encode
from django.utils.encoding import force_bytes, force_text
from django.urls import reverse, reverse_lazy
from django.contrib.auth.views import PasswordResetConfirmView
from django.views import View
from django.contrib.auth.forms import SetPasswordForm
from django.contrib.auth.mixins import LoginRequiredMixin
from django.contrib.auth.models import User
from apps.student.models import Student
from .emailAuthBackend import EmailBackend
from .models import TemporaryAccessRequest
from .utils import user_creation, send_email_confirmation
class RegistrationView(FormView):
template_name = 'account/registration.html'
form_class = SignUpForm
def get_context_data(self, **kwargs: Any) -> Dict[str, Any]:
context = super().get_context_data(**kwargs)
context['temporary_registration'] = settings.TEMPORARY_ACCOUNTS_DATE_LIMIT >= date.today()
return context
def form_valid(self, form):
user_creation(form, self.request)
return redirect(reverse('home:home'))
class TemporaryRegistrationView(FormView):
form_class = TemporaryRequestSignUpForm
template_name = 'account/temporary_registration.html'
def dispatch(self, request, *args: Any, **kwargs: Any):
"""Do not allow to use this view outside of allowed temporary accounts windows."""
if not settings.TEMPORARY_ACCOUNTS_DATE_LIMIT >= date.today():
return redirect(reverse('account:registration'))
return super().dispatch(request, *args, **kwargs)
def get_context_data(self, **kwargs: Any) -> Dict[str, Any]:
context = super().get_context_data(**kwargs)
context['DEADLINE_TEMPORARY_REGISTRATION'] = settings.TEMPORARY_ACCOUNTS_DATE_LIMIT
return context
def form_valid(self, form) -> HttpResponse:
user_creation(form, self.request)
return redirect(reverse('home:home'))
class ConfirmUser(View):
def get(self, request, uidb64, token):
tempAccessReq: Union[TemporaryAccessRequest, None] = None
try:
uid = force_text(urlsafe_base64_decode(uidb64))
user = User.objects.get(pk=uid)
except (TypeError, ValueError, OverflowError, User.DoesNotExist):
return render(self.request, 'account/activation_invalid.html')
# checking if the user is not a temporary one
try:
tempAccessReq: TemporaryAccessRequest = TemporaryAccessRequest.objects.get(
user=user.pk)
if not tempAccessReq.approved:
return render(self.request, 'account/activation_invalid.html')
except TemporaryAccessRequest.DoesNotExist:
tempAccessReq = None
# checking if the token is valid.
if account_activation_token.check_token(user, token):
# if valid set active true
user.is_active = True
if tempAccessReq is not None:
user.email = tempAccessReq.final_email
tempAccessReq.delete()
messages.warning(
request, f'Dorénavant vous devez utiliser {user.email} pour vous connecter.')
user.save()
login(self.request, user,
backend='apps.account.emailAuthBackend.EmailBackend')
messages.success(request, 'Votre compte est desormais actif !')
return redirect(reverse('home:home'))
else:
return render(self.request, 'account/activation_invalid.html')
class AuthView(FormView):
template_name = 'account/login.html'
form_class = LoginForm
def get(self, request):
if request.user.is_authenticated:
user = request.user
message = f'Vous etes déjà connecté en tant que {user.first_name.title()}.'
messages.warning(request, message)
return redirect(reverse('home:home'))
else:
return super(AuthView, AuthView).get(self, request)
def form_invalid(self, form):
message = f'Veuillez vous connecter avec votre adresse mail ECN.'
messages.warning(self.request, message)
return redirect(reverse('account:login'))
def form_valid(self, form):
username = form.cleaned_data['email']
password = form.cleaned_data['password']
user = EmailBackend.authenticate(username=username, password=password)
if user is not None:
if user.is_active:
message = f'Bonjour {user.first_name.title()} !'
messages.success(self.request, message)
else:
if settings.TEMPORARY_ACCOUNTS_DATE_LIMIT >= date.today():
# During certain periods allow temporary accounts.
try:
temporaryAccessRequest: TemporaryAccessRequest = TemporaryAccessRequest.objects.get(
user=user
)
if not temporaryAccessRequest.mail_valid:
message = 'Votre compte n\'est pas encore actif.\
Veuillez cliquer sur le lien envoyé par mail pour l\'\
activer.'
messages.error(self.request, message)
return redirect(reverse('account:login'))
if temporaryAccessRequest.approved_until <= date.today():
message = 'Votre compte n\'a pas encore été approuvé.\
On vous prévient par mail dès que c\'est le cas.'
messages.error(self.request, message)
return redirect(reverse('account:login'))
message = f'Votre compte n\'est pas encore définitif.\
Veuillez le valider <a href="{reverse("account:upgrade-permanent")}">ici</a>.\
Attention après le {temporaryAccessRequest.approved_until}\
vous ne pourrez plus vous connecter si vous n\'avez pas renseigné votre adresse Centrale.'
messages.warning(self.request, message)
except TemporaryAccessRequest.DoesNotExist:
messages.error(
self.request, 'Identifiant inconnu ou mot de passe invalide.')
return redirect(reverse('account:login'))
else:
messages.warning(
self.request, 'Votre compte n\'est pas encore actif. Veuillez cliquer sur le lien dans \'email.')
login(self.request, user,
backend='apps.account.emailAuthBackend.EmailBackend')
return redirect(reverse('home:home'))
else:
messages.error(
self.request, 'Identifiant inconnu ou mot de passe invalide.')
return redirect(reverse('account:login'))
class LogoutView(View):
def get(self, request):
logout(request)
messages.success(request, 'Vous avez été déconnecté.')
return redirect(reverse('account:login'))
class ForgottenPassView(FormView):
form_class = ForgottenPassForm
template_name = 'account/forgotten_pass.html'
def form_valid(self, form):
try:
user = User.objects.get(email=form.cleaned_data['email'])
if user is not None:
subject = '[Nantral Platform] Reinitialisation de votre mot de passe'
current_site = get_current_site(self.request)
message = render_to_string('account/mail/password_request.html', {
'user': user,
'domain': current_site.domain,
'uidb64': urlsafe_base64_encode(force_bytes(user.pk)),
# method will generate a hash value with user related data
'token': account_activation_token.make_token(user),
})
user.email_user(
subject, message, '[email protected]', html_message=message)
except User.DoesNotExist:
pass
messages.success(
self.request, 'Un email de récuperation a été envoyé si cette adresse existe.')
return redirect(reverse('account:login'))
class PasswordResetConfirmCustomView(PasswordResetConfirmView):
template_name = 'account/reset_password.html'
post_reset_login = True
post_reset_login_backend = 'apps.account.emailAuthBackend.EmailBackend'
form_class = SetPasswordForm
token_generator = account_activation_token
success_url = reverse_lazy('home:home')
def redirect_to_student(request, user_id):
user = User.objects.get(id=user_id)
student = Student.objects.get(user=user)
return redirect('student:update', student.pk)
class ABCApprovalTemporaryResgistrationView(UserIsSuperAdmin, View):
def get(self, request, id):
self.temp_req: TemporaryAccessRequest = get_object_or_404(
TemporaryAccessRequest, id=id)
if self.temp_req.approved:
messages.warning(request, f'Cette requête a déjà été approuvée.')
return redirect(reverse('home:home'))
class ApproveTemporaryRegistrationView(ABCApprovalTemporaryResgistrationView):
def get(self, request, id):
super().get(request, id)
self.temp_req.approve()
messages.success(
request, f'Vous avez accepté la demande de {self.temp_req.user.first_name} {self.temp_req.user.last_name}')
return redirect(reverse('home:home'))
class DenyTemporaryRegistrationView(ABCApprovalTemporaryResgistrationView):
def get(self, request, id):
super().get(request, id)
messages.success(
request, f'Vous avez refusé la demande de {self.temp_req.user.first_name} {self.temp_req.user.last_name}')
self.temp_req.deny()
return redirect(reverse('home:home'))
class ConfirmUserTemporary(View):
def get(self, request, uidb64, token):
try:
uid = force_text(urlsafe_base64_decode(uidb64))
user = User.objects.get(pk=uid)
except (TypeError, ValueError, OverflowError, User.DoesNotExist):
user = None
# checking if the user exists, if the token is valid.
if user is not None and account_activation_token.check_token(user, token):
try:
temp_req: TemporaryAccessRequest = TemporaryAccessRequest.objects.get(
user=user)
temp_req.mail_valid = True
temp_req.save()
except TemporaryAccessRequest.DoesNotExist:
return render(self.request, 'account/activation_invalid.html')
messages.success(request, 'Votre addresse mail est confirmé! \n\
Comme vous n\'avez pas utilisé votre adresse Centrale, vous devez encore attendre qu\'un administrateur vérifie votre inscription.\n\
On vous prévient par mail dès que c\'est bon!. ')
return redirect(reverse('home:home'))
else:
return render(self.request, 'account/activation_invalid.html')
class PermanentAccountUpgradeView(LoginRequiredMixin, FormView):
form_class = UpgradePermanentAccountForm
template_name = 'account/permanent_account_upgrade.html'
success_url = reverse_lazy('home:home')
def get(self, request):
get_object_or_404(
TemporaryAccessRequest,
user=self.request.user
)
return super().get(request)
def form_valid(self, form: UpgradePermanentAccountForm) -> HttpResponse:
temp_request = get_object_or_404(
TemporaryAccessRequest,
user=self.request.user
)
temp_request.final_email = form.cleaned_data['email']
temp_request.save()
send_email_confirmation(
self.request.user, self.request, send_to=form.cleaned_data['email'])
return super().form_valid(form)
<|code_end|>
|
Pages lentes
Certaines pages sont un peu lentes à charger:
- liste des clubs
C'est peut-être lié au grand nombre d'images, il faudrait étudier la possibilité de cacher ces images.
|
server/apps/club/views.py
<|code_start|>from django.views.generic import ListView, TemplateView
from django.contrib.auth.mixins import LoginRequiredMixin
from django.urls import resolve
from apps.club.models import Club, BDX
from apps.group.models import Group
from apps.group.views import BaseDetailGroupView
from apps.utils.slug import *
class ListClubView(TemplateView):
template_name = 'club/list.html'
def get_context_data(self, **kwargs):
context = {'club_list': [] }
try:
context['club_list'].append({
'grouper': "Mes Clubs et Assos",
'list': Club.objects.filter(members__user=self.request.user).only('name', 'slug', 'logo', 'bdx_type'),
})
except Exception:
pass
club_list = Club.objects.all().select_related('bdx_type').only('name', 'slug', 'logo', 'bdx_type')
context['club_list'].append({
'grouper': "Associations",
'list': club_list.filter(bdx_type__isnull=True)
})
for bdx in BDX.objects.all():
context['club_list'].append({
'grouper': f'Clubs {bdx.name}',
'list': club_list.filter(bdx_type=bdx),
})
return context
class DetailClubView(BaseDetailGroupView):
'''Vue de détails d'un club.'''
template_name='club/detail.html'
class DetailGroupMembersView(LoginRequiredMixin, ListView):
template_name = 'club/members.html'
def get_object(self, **kwargs):
app = resolve(self.request.path).app_name
slug = self.kwargs.get("slug")
return get_object_from_slug(app, slug)
def get_queryset(self, **kwargs):
object = self.get_object()
members = object.members.through.objects.filter(group=object)
return members.order_by('year', 'order')
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['object'] = self.get_object()
return context
<|code_end|>
|
server/apps/club/views.py
<|code_start|>from django.views.generic import ListView, TemplateView
from django.contrib.auth.mixins import LoginRequiredMixin
from django.urls import resolve
from apps.club.models import Club
from apps.group.views import BaseDetailGroupView
from apps.utils.slug import *
class ListClubView(TemplateView):
template_name = 'club/list.html'
def get_context_data(self, **kwargs):
context = {'club_list': {} }
clubList = {}
allMembersClub = Club.objects.filter(members__user=self.request.user).only('name', 'slug', 'logo', 'bdx_type')
for club in allMembersClub:
clubList.setdefault("Mes Clubs et Assos", []).append(club)
allClubs = Club.objects.all().select_related("bdx_type").only('name', 'slug', 'logo', 'bdx_type')
for club in allClubs:
if(club.bdx_type is None):
clubList.setdefault("Associations", []).append(club)
else:
clubList.setdefault(f'Clubs {club.bdx_type.name}', []).append(club)
context['club_list']=clubList
return context
class DetailClubView(BaseDetailGroupView):
'''Vue de détails d'un club.'''
template_name='club/detail.html'
class DetailGroupMembersView(LoginRequiredMixin, ListView):
template_name = 'club/members.html'
def get_object(self, **kwargs):
app = resolve(self.request.path).app_name
slug = self.kwargs.get("slug")
return get_object_from_slug(app, slug)
def get_queryset(self, **kwargs):
object = self.get_object()
members = object.members.through.objects.filter(group=object)
return members.order_by('year', 'order')
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['object'] = self.get_object()
return context
<|code_end|>
|
Problème avec les liens vers les auteur.ic.es des suggestions
Quand quelqu'un fait une suggestion depuis le site, le lien pour avoir le nom de la personne ne fonctionne pas.
|
server/apps/home/forms.py
<|code_start|>from django import forms
class SuggestionForm(forms.Form):
title = forms.CharField(max_length=50, required=True)
description = forms.CharField(widget=forms.Textarea)
<|code_end|>
server/apps/home/views.py
<|code_start|>from datetime import *
from typing import List
from django.contrib.sites.shortcuts import get_current_site
from django.db.models.query import QuerySet
from django.shortcuts import render, redirect
from django.views.generic import TemplateView, FormView
from django.contrib import messages
from django.contrib.auth.mixins import LoginRequiredMixin
from apps.event.models import BaseEvent
from apps.post.models import Post
from apps.utils.github import create_issue
from .forms import SuggestionForm
class HomeView(LoginRequiredMixin, TemplateView):
template_name = 'home/home.html'
def get_context_data(self, **kwargs):
# Call the base implementation first to get a context
context = super().get_context_data(**kwargs)
posts: List[Post] = Post.objects.filter(
publication_date__gte=date.today()-timedelta(days=10)).order_by('-publication_date')
context['posts'] = [
post for post in posts if post.can_view(self.request.user)]
return context
class SuggestionView(LoginRequiredMixin, FormView):
template_name = 'home/suggestions.html'
form_class = SuggestionForm
def form_valid(self, form):
create_issue(
title=form.cleaned_data['title'],
body=f"{form.cleaned_data['description']} <br/> <a href='http://{get_current_site(self.request)}{self.request.user.student.get_absolute_url}'>Clique pour découvrir qui propose ça.</a>"
)
messages.success(
self.request, 'Votre suggestion a été enregistrée merci')
return redirect('home:home')
def handler404(request, *args, **argv):
response = render(request, '404.html', context={}, status=404)
return response
def handler500(request, *args, **argv):
response = render(request, '500.html', context={},
status=500)
return response
def event_sort(events, request):
tri = {}
jours = ["Lundi", "Mardi", "Mercredi",
"Jeudi", "Vendredi", "Samedi", "Dimanche"]
mois = ["Janvier", "Février", "Mars", "Avril", "Mai", "Juin",
"Juillet", "Août", "Septembre", "Octobre", "Novembre", "Décembre"]
for event in events:
if event.date.date() == date.today():
if "Aujourd'hui" in tri:
tri["Aujourd'hui"].append(
(event, event.is_participating(request.user)))
else:
tri["Aujourd'hui"] = list()
tri["Aujourd'hui"].append(
(event, event.is_participating(request.user)))
elif event.date.date() == (date.today()+timedelta(days=1)):
if "Demain" in tri:
tri["Demain"].append(
(event, event.is_participating(request.user)))
else:
tri["Demain"] = list()
tri["Demain"].append(
(event, event.is_participating(request.user)))
else:
written_date = jours[event.date.weekday(
)] + " " + str(event.date.day) + " " + mois[event.date.month-1]
if written_date in tri:
tri[written_date].append(
(event, event.is_participating(request.user)))
else:
tri[written_date] = list()
tri[written_date].append(
(event, event.is_participating(request.user)))
return tri
<|code_end|>
server/apps/utils/github.py
<|code_start|>import requests
from django.conf import settings
def create_issue(title: str, body: str):
issue = {
'title': title,
'body': body
}
resp = requests.post(f'https://api.github.com/repos/{settings.GITHUB_REPO}/issues',
json=issue, auth=(settings.GITHUB_USER, settings.GITHUB_TOKEN))
if resp.status_code != 201:
raise Exception(f'Error while posting issue to Github: {resp.reason}')
return resp.json()['number']
def close_issue(number: int):
"""Function to close an issue in the repo."""
update = {'state': 'closed'}
requests.post(f'https://api.github.com/repos/{settings.GITHUB_REPO}/issues/{number}',
json=update, auth=(settings.GITHUB_USER, settings.GITHUB_TOKEN))
<|code_end|>
|
server/apps/home/forms.py
<|code_start|>from django import forms
TYPE_CHOICES = (
(1, ("Bug")),
(2, ("Suggestion"))
)
class SuggestionForm(forms.Form):
title = forms.CharField(max_length=50, required=True)
description = forms.CharField(widget=forms.Textarea)
suggestionOrBug = forms.ChoiceField(label="Type",
choices=TYPE_CHOICES,
required=True)
<|code_end|>
server/apps/home/views.py
<|code_start|>from datetime import *
from typing import List
from django.contrib.sites.shortcuts import get_current_site
from django.db.models.query import QuerySet
from django.shortcuts import render, redirect
from django.views.generic import TemplateView, FormView
from django.contrib import messages
from django.contrib.auth.mixins import LoginRequiredMixin
from apps.event.models import BaseEvent
from apps.post.models import Post
from apps.utils.github import create_issue
from .forms import SuggestionForm
class HomeView(LoginRequiredMixin, TemplateView):
template_name = 'home/home.html'
def get_context_data(self, **kwargs):
# Call the base implementation first to get a context
context = super().get_context_data(**kwargs)
posts: List[Post] = Post.objects.filter(
publication_date__gte=date.today()-timedelta(days=10)).order_by('-publication_date')
context['posts'] = [
post for post in posts if post.can_view(self.request.user)]
return context
class SuggestionView(LoginRequiredMixin, FormView):
template_name = 'home/suggestions.html'
form_class = SuggestionForm
def form_valid(self, form):
create_issue(
title=form.cleaned_data['title'],
body=f"{form.cleaned_data['description']} <br/> [Clique pour découvrir qui propose ça.](http://{get_current_site(self.request)}{self.request.user.student.get_absolute_url()})",
label=form.cleaned_data['suggestionOrBug']
)
messages.success(
self.request, 'Votre suggestion a été enregistrée merci')
return redirect('home:home')
def handler404(request, *args, **argv):
response = render(request, '404.html', context={}, status=404)
return response
def handler500(request, *args, **argv):
response = render(request, '500.html', context={},
status=500)
return response
def event_sort(events, request):
tri = {}
jours = ["Lundi", "Mardi", "Mercredi",
"Jeudi", "Vendredi", "Samedi", "Dimanche"]
mois = ["Janvier", "Février", "Mars", "Avril", "Mai", "Juin",
"Juillet", "Août", "Septembre", "Octobre", "Novembre", "Décembre"]
for event in events:
if event.date.date() == date.today():
if "Aujourd'hui" in tri:
tri["Aujourd'hui"].append(
(event, event.is_participating(request.user)))
else:
tri["Aujourd'hui"] = list()
tri["Aujourd'hui"].append(
(event, event.is_participating(request.user)))
elif event.date.date() == (date.today()+timedelta(days=1)):
if "Demain" in tri:
tri["Demain"].append(
(event, event.is_participating(request.user)))
else:
tri["Demain"] = list()
tri["Demain"].append(
(event, event.is_participating(request.user)))
else:
written_date = jours[event.date.weekday(
)] + " " + str(event.date.day) + " " + mois[event.date.month-1]
if written_date in tri:
tri[written_date].append(
(event, event.is_participating(request.user)))
else:
tri[written_date] = list()
tri[written_date].append(
(event, event.is_participating(request.user)))
return tri
<|code_end|>
server/apps/utils/github.py
<|code_start|>import requests
from django.conf import settings
def create_issue(title: str, body: str, label):
label = "bug" if int(label) == 1 else "suggestion"
issue = {
'title': title,
'body': body,
'labels': [label]
}
resp = requests.post(f'https://api.github.com/repos/{settings.GITHUB_REPO}/issues',
json=issue, auth=(settings.GITHUB_USER, settings.GITHUB_TOKEN))
if resp.status_code != 201:
raise Exception(
f'Error while posting issue to Github: {resp.reason}')
return resp.json()['number']
def close_issue(number: int):
"""Function to close an issue in the repo."""
update = {'state': 'closed'}
requests.post(f'https://api.github.com/repos/{settings.GITHUB_REPO}/issues/{number}',
json=update, auth=(settings.GITHUB_USER, settings.GITHUB_TOKEN))
<|code_end|>
|
[Colocs] Bug d'affichage des adresses avec une coloc future
Si on ajoute une coloc avec une date de début dans le futur, elle devient la dernière coloc associée à cette adresse, et comme elle n'est pas encore occupée, elle disparaît de la carte
|
server/apps/roommates/api_views.py
<|code_start|>from rest_framework import generics, permissions, status
from rest_framework.response import Response
from rest_framework.views import APIView
from .serializers import HousingLastRoommatesSerializer #HousingSerializer, RoommatesGroupSerializer, RoommatesMemberSerializer
from .models import Housing, NamedMembershipRoommates, Roommates
from apps.student.models import Student
from apps.utils.geocoding import geocode
from django.utils import timezone
from django.db.models import Q
class SearchGeocodingView(APIView):
"""A view to query the external Geocoding service."""
permission_classes = [permissions.IsAuthenticated]
def get(self, request):
return Response(data=geocode(request.GET.get("search_string")))
class HousingView(generics.ListCreateAPIView):
"""API View to get all the housing and their current roommates"""
serializer_class = HousingLastRoommatesSerializer
permission_classes = [permissions.IsAuthenticated]
def get_queryset(self):
now = timezone.now()
query = Housing.objects.filter(
Q(Q(roommates__begin_date__lte=now) & (Q(roommates__end_date__gte=now) | Q(roommates__end_date=None))) | (Q(roommates__members=None))).distinct()
return query
class CheckAddressView(APIView):
"""An API view to wether wether a housing already exists at selected address.
Returns the pk if it does, None otherwise"""
permission_classes = [permissions.IsAuthenticated]
def post(self, request):
query = Housing.objects.filter(address=request.data.get("address"))
data = [{
'pk':housing.pk,
'name': f'{housing.address} - {housing.details} ({housing.last_roommates})'
} for housing in query ]
return Response(data=data)
'''
class HousingView(generics.ListCreateAPIView):
serializer_class = HousingSerializer
permission_classes = [permissions.IsAuthenticated]
def get_queryset(self):
return Housing.objects.all()
class HousingRoommates(generics.ListCreateAPIView):
"""API View to get all the housing and their current roommates"""
serializer_class = HousingSerializer
permission_classes = [permissions.IsAuthenticated]
def get_queryset(self):
now = timezone.now()
query = Housing.objects.filter(
Q(Q(roommates__begin_date__lte=now) & (Q(roommates__end_date__gte=now) | Q(roommates__end_date=None))) | (Q(roommates__members=None))).distinct()
return query
class RoommatesGroupView(generics.ListCreateAPIView):
"""API View to get all the groups of roommates that lived in a house."""
serializer_class = RoommatesGroupSerializer
permission_classes = [permissions.IsAuthenticated]
def get_queryset(self):
return Roommates.objects.filter(housing=self.kwargs['pk'])
def create(self, request, *args, **kwargs):
housing = generics.get_object_or_404(Housing, pk=self.kwargs['pk'])
copy = request.data.copy()
copy['housing'] = housing.pk
serializer = self.get_serializer(
data=copy)
# Due to the fact that the student field in the NamedMembershipRoommates Serializer
# has to be read_only, the student id is passed as an attribute of the serializer
# otherwise it would be cleaned out in the validated data.
serializer.members = [] if not request.data['add_me'] else [
{
'student': request.user.student.id,
'nickname': request.data['nickname'],
}
]
serializer.is_valid(raise_exception=True)
self.perform_create(serializer)
headers = self.get_success_headers(serializer.data)
return Response(serializer.data, status=status.HTTP_201_CREATED, headers=headers)
class RoommatesMembersView(generics.ListCreateAPIView):
"""API View to list members of a roommates group."""
serializer_class = RoommatesMemberSerializer
permission_classes = [permissions.IsAuthenticated]
def get_queryset(self):
return NamedMembershipRoommates.objects.filter(roommates=self.kwargs['pk'])
def create(self, request, *args, **kwargs):
group = generics.get_object_or_404(
Roommates, id=self.kwargs['pk'])
copy = request.data.copy()
copy['group'] = group.id
student = generics.get_object_or_404(
Student, id=request.data['student'])
serializer = self.get_serializer(data=copy)
serializer.student = student
serializer.is_valid(raise_exception=True)
self.perform_create(serializer)
headers = self.get_success_headers(serializer.data)
return Response(serializer.data, status=status.HTTP_201_CREATED, headers=headers)
class RoommatesGroupEditView(generics.RetrieveUpdateDestroyAPIView):
"""API View to update or delete a roommates group."""
serializer_class = RoommatesGroupSerializer
permission_classes = [permissions.IsAuthenticated]
def get_queryset(self):
return Roommates.objects.filter(id=self.kwargs['pk'])
class RoommatesMemberView(generics.RetrieveUpdateDestroyAPIView):
"""API View to get a specific membership and update or delete it."""
serializer_class = RoommatesMemberSerializer
permission_classes = [permissions.IsAuthenticated]
def get_queryset(self):
return NamedMembershipRoommates.objects.filter(id=self.kwargs['pk'])
'''<|code_end|>
server/apps/roommates/models.py
<|code_start|>from django.db import models
from datetime import date
from apps.group.models import Group, NamedMembership
from apps.student.models import Student
from apps.utils.geocoding import geocode
class Housing(models.Model):
address = models.CharField(
max_length=250, verbose_name='Adresse')
details = models.CharField(
max_length=100, verbose_name='Complément d\'adresse', null=True, blank=True)
latitude = models.FloatField(null=True, blank=True)
longitude = models.FloatField(null=True, blank=True)
def save(self, *args, **kwargs):
coordinates = geocode(self.address)[0]
if not self.latitude or not self.longitude or abs(self.latitude-coordinates['lat'])>5e-3 or abs(self.longitude-coordinates['long'])>5e-3:
self.latitude = coordinates['lat']
self.longitude = coordinates['long']
super(Housing, self).save(*args, **kwargs)
def __str__(self):
return self.address if self.address else self.id
@property
def last_roommates(self):
last_roommates = Roommates.objects.filter(housing=self).order_by('begin_date').last()
return last_roommates
class Roommates(Group):
name = models.CharField(verbose_name='Nom du groupe',
max_length=100)
begin_date = models.DateField("Date d'emménagement", default=date.today)
end_date = models.DateField("Date de sortie", null=True, blank=True)
housing = models.ForeignKey(
to=Housing, on_delete=models.CASCADE)
members = models.ManyToManyField(
to=Student, through='NamedMembershipRoommates', blank=True)
class Meta:
verbose_name = "coloc"
class NamedMembershipRoommates(NamedMembership):
group = models.ForeignKey(
to=Roommates, on_delete=models.CASCADE)
nickname = models.CharField(
max_length=100, verbose_name='Surnom', blank=True, null=True)
def __str__(self):
if self.nickname:
return f'{self.nickname} ({self.student.name})'
else:
return self.student.name
<|code_end|>
server/apps/roommates/serializers.py
<|code_start|>from rest_framework import serializers
from .models import Housing, NamedMembershipRoommates, Roommates
class HousingLastRoommatesSerializer(serializers.ModelSerializer):
'''Serializer for the Housing Model to display on the map,
with only the last roommates.'''
roommates = serializers.SerializerMethodField()
class Meta:
model = Housing
fields='__all__'
def get_roommates(self, obj):
roommates = Roommates.objects.filter(housing=obj).order_by('begin_date').last()
return RoommatesSerializer(roommates, many=False, context=self._context).data
class RoommatesMemberSerializer(serializers.ModelSerializer):
'''Serializer for a member of roommates'''
#student = StudentSerializer(read_only=True)
name = serializers.SerializerMethodField()
class Meta:
model = NamedMembershipRoommates
fields = ['nickname', 'name']
def get_name(self, obj):
return obj.student.name
class RoommatesSerializer(serializers.ModelSerializer):
'''Serializer for roommates'''
members = serializers.SerializerMethodField() #RoommatesMemberSerializer(read_only=True, many=True)
url = serializers.SerializerMethodField()
class Meta:
model = Roommates
fields = ['name', 'begin_date', 'end_date', 'members', 'url']
def get_members(self, obj):
members = NamedMembershipRoommates.objects.filter(group=obj)
return RoommatesMemberSerializer(members, many=True, context=self._context).data
def get_url(self, obj):
return obj.get_absolute_url
'''
class HousingSerializer(serializers.ModelSerializer):
edit_url = serializers.HyperlinkedIdentityField(
view_name='roommates:update', read_only=True, lookup_field='slug')
url = serializers.HyperlinkedIdentityField(
view_name='roommates:detail', read_only=True, lookup_field='slug')
roommates = serializers.SerializerMethodField()
name = serializers.SerializerMethodField()
class Meta:
model = Housing
fields = '__all__'
def get_roommates(self, obj):
roommates = Roommates.objects.filter(housing=obj)
return RoommatesGroupSerializer(roommates, many=True, context=self._context).data
def get_name(self, obj):
query = Roommates.objects.filter(housing=obj).order_by('begin_date').last()
return query.name if query else "Coloc sans nom"
class RoommatesMemberSerializer(serializers.ModelSerializer):
student = StudentSerializer(read_only=True)
edit_api_url = serializers.HyperlinkedIdentityField(
view_name='roommates_api:roommates-member', read_only=True)
class Meta:
model = NamedMembershipRoommates
fields = '__all__'
def create(self, validated_data):
return NamedMembershipRoommates.objects.create(
student=self.student,
group=validated_data['group']
)
class RoommatesGroupSerializer(serializers.ModelSerializer):
"""A serializer for the roommates group."""
members = serializers.SerializerMethodField()
edit_members_api_url = serializers.HyperlinkedIdentityField(
view_name='roommates_api:roommates-members', read_only=True)
edit_api_url = serializers.HyperlinkedIdentityField(
view_name='roommates_api:roommates-group-edit', read_only=True)
class Meta:
model = Roommates
fields = '__all__'
def get_members(self, obj):
members = NamedMembershipRoommates.objects.filter(group=obj.id)
return RoommatesMemberSerializer(members, many=True, context=self._context).data
def create(self, validated_data):
roommates = Roommates(
name=validated_data['name'],
housing=validated_data['housing'],
begin_date=validated_data['begin_date']
)
roommates.save()
for member in self.members:
NamedMembershipRoommates.objects.create(
student=Student.objects.get(id=member['student']),
group=roommates,
nickname=member['nickname']
)
return roommates
'''<|code_end|>
|
server/apps/roommates/api_views.py
<|code_start|>from rest_framework import generics, permissions, status
from rest_framework.response import Response
from rest_framework.views import APIView
# HousingSerializer, RoommatesGroupSerializer, RoommatesMemberSerializer
from .serializers import HousingLastRoommatesSerializer
from .models import Housing, NamedMembershipRoommates, Roommates
from apps.student.models import Student
from apps.utils.geocoding import geocode
from django.utils import timezone
from django.db.models import Q
class SearchGeocodingView(APIView):
"""A view to query the external Geocoding service."""
permission_classes = [permissions.IsAuthenticated]
def get(self, request):
return Response(data=geocode(request.GET.get("search_string")))
class HousingView(generics.ListCreateAPIView):
"""API View to get all the housing and their current roommates"""
serializer_class = HousingLastRoommatesSerializer
permission_classes = [permissions.IsAuthenticated]
def get_queryset(self):
now = timezone.now()
query = Housing.objects.filter(
Q(Q(roommates__begin_date__lte=now) & (Q(roommates__end_date__gte=now) | Q(roommates__end_date=None))) | (Q(roommates__members=None))).distinct()
return query
class CheckAddressView(APIView):
"""An API view to wether wether a housing already exists at selected address.
Returns the pk if it does, None otherwise"""
permission_classes = [permissions.IsAuthenticated]
def post(self, request):
query = Housing.objects.filter(address=request.data.get("address"))
data = [{
'pk': housing.pk,
'name': f'{housing.address} - {housing.details} ({housing.current_roommates})'
} for housing in query]
return Response(data=data)
'''
class HousingView(generics.ListCreateAPIView):
serializer_class = HousingSerializer
permission_classes = [permissions.IsAuthenticated]
def get_queryset(self):
return Housing.objects.all()
class HousingRoommates(generics.ListCreateAPIView):
"""API View to get all the housing and their current roommates"""
serializer_class = HousingSerializer
permission_classes = [permissions.IsAuthenticated]
def get_queryset(self):
now = timezone.now()
query = Housing.objects.filter(
Q(Q(roommates__begin_date__lte=now) & (Q(roommates__end_date__gte=now) | Q(roommates__end_date=None))) | (Q(roommates__members=None))).distinct()
return query
class RoommatesGroupView(generics.ListCreateAPIView):
"""API View to get all the groups of roommates that lived in a house."""
serializer_class = RoommatesGroupSerializer
permission_classes = [permissions.IsAuthenticated]
def get_queryset(self):
return Roommates.objects.filter(housing=self.kwargs['pk'])
def create(self, request, *args, **kwargs):
housing = generics.get_object_or_404(Housing, pk=self.kwargs['pk'])
copy = request.data.copy()
copy['housing'] = housing.pk
serializer = self.get_serializer(
data=copy)
# Due to the fact that the student field in the NamedMembershipRoommates Serializer
# has to be read_only, the student id is passed as an attribute of the serializer
# otherwise it would be cleaned out in the validated data.
serializer.members = [] if not request.data['add_me'] else [
{
'student': request.user.student.id,
'nickname': request.data['nickname'],
}
]
serializer.is_valid(raise_exception=True)
self.perform_create(serializer)
headers = self.get_success_headers(serializer.data)
return Response(serializer.data, status=status.HTTP_201_CREATED, headers=headers)
class RoommatesMembersView(generics.ListCreateAPIView):
"""API View to list members of a roommates group."""
serializer_class = RoommatesMemberSerializer
permission_classes = [permissions.IsAuthenticated]
def get_queryset(self):
return NamedMembershipRoommates.objects.filter(roommates=self.kwargs['pk'])
def create(self, request, *args, **kwargs):
group = generics.get_object_or_404(
Roommates, id=self.kwargs['pk'])
copy = request.data.copy()
copy['group'] = group.id
student = generics.get_object_or_404(
Student, id=request.data['student'])
serializer = self.get_serializer(data=copy)
serializer.student = student
serializer.is_valid(raise_exception=True)
self.perform_create(serializer)
headers = self.get_success_headers(serializer.data)
return Response(serializer.data, status=status.HTTP_201_CREATED, headers=headers)
class RoommatesGroupEditView(generics.RetrieveUpdateDestroyAPIView):
"""API View to update or delete a roommates group."""
serializer_class = RoommatesGroupSerializer
permission_classes = [permissions.IsAuthenticated]
def get_queryset(self):
return Roommates.objects.filter(id=self.kwargs['pk'])
class RoommatesMemberView(generics.RetrieveUpdateDestroyAPIView):
"""API View to get a specific membership and update or delete it."""
serializer_class = RoommatesMemberSerializer
permission_classes = [permissions.IsAuthenticated]
def get_queryset(self):
return NamedMembershipRoommates.objects.filter(id=self.kwargs['pk'])
'''
<|code_end|>
server/apps/roommates/models.py
<|code_start|>from django.db import models
from datetime import date
from apps.group.models import Group, NamedMembership
from apps.student.models import Student
from apps.utils.geocoding import geocode
from django.db.models import Q
from django.utils import timezone
class Housing(models.Model):
address = models.CharField(
max_length=250, verbose_name='Adresse')
details = models.CharField(
max_length=100, verbose_name='Complément d\'adresse', null=True, blank=True)
latitude = models.FloatField(null=True, blank=True)
longitude = models.FloatField(null=True, blank=True)
def save(self, *args, **kwargs):
coordinates = geocode(self.address)[0]
if not self.latitude or not self.longitude or abs(self.latitude-coordinates['lat']) > 5e-3 or abs(self.longitude-coordinates['long']) > 5e-3:
self.latitude = coordinates['lat']
self.longitude = coordinates['long']
super(Housing, self).save(*args, **kwargs)
def __str__(self):
return self.address if self.address else self.id
@property
def current_roommates(self):
now = timezone.now()
return Roommates.objects.filter(Q(housing=self) & (Q(Q(begin_date__lte=now) & (
Q(end_date__gte=now) | Q(end_date=None))) | (Q(members=None)))).order_by('begin_date').last()
class Roommates(Group):
name = models.CharField(verbose_name='Nom du groupe',
max_length=100)
begin_date = models.DateField("Date d'emménagement", default=date.today)
end_date = models.DateField("Date de sortie", null=True, blank=True)
housing = models.ForeignKey(
to=Housing, on_delete=models.CASCADE)
members = models.ManyToManyField(
to=Student, through='NamedMembershipRoommates', blank=True)
class Meta:
verbose_name = "coloc"
class NamedMembershipRoommates(NamedMembership):
group = models.ForeignKey(
to=Roommates, on_delete=models.CASCADE)
nickname = models.CharField(
max_length=100, verbose_name='Surnom', blank=True, null=True)
def __str__(self):
if self.nickname:
return f'{self.nickname} ({self.student.name})'
else:
return self.student.name
<|code_end|>
server/apps/roommates/serializers.py
<|code_start|>from rest_framework import serializers
from .models import Housing, NamedMembershipRoommates, Roommates
from django.db.models import Q
from django.utils import timezone
class HousingLastRoommatesSerializer(serializers.ModelSerializer):
'''Serializer for the Housing Model to display on the map,
with only the last roommates.'''
roommates = serializers.SerializerMethodField()
class Meta:
model = Housing
fields = '__all__'
def get_roommates(self, obj):
return RoommatesSerializer(obj.current_roommates, many=False, context=self._context).data
class RoommatesMemberSerializer(serializers.ModelSerializer):
'''Serializer for a member of roommates'''
#student = StudentSerializer(read_only=True)
name = serializers.SerializerMethodField()
class Meta:
model = NamedMembershipRoommates
fields = ['nickname', 'name']
def get_name(self, obj):
return obj.student.name
class RoommatesSerializer(serializers.ModelSerializer):
'''Serializer for roommates'''
# RoommatesMemberSerializer(read_only=True, many=True)
members = serializers.SerializerMethodField()
url = serializers.SerializerMethodField()
class Meta:
model = Roommates
fields = ['name', 'begin_date', 'end_date', 'members', 'url']
def get_members(self, obj):
members = NamedMembershipRoommates.objects.filter(group=obj)
return RoommatesMemberSerializer(members, many=True, context=self._context).data
def get_url(self, obj):
return obj.get_absolute_url
'''
class HousingSerializer(serializers.ModelSerializer):
edit_url = serializers.HyperlinkedIdentityField(
view_name='roommates:update', read_only=True, lookup_field='slug')
url = serializers.HyperlinkedIdentityField(
view_name='roommates:detail', read_only=True, lookup_field='slug')
roommates = serializers.SerializerMethodField()
name = serializers.SerializerMethodField()
class Meta:
model = Housing
fields = '__all__'
def get_roommates(self, obj):
roommates = Roommates.objects.filter(housing=obj)
return RoommatesGroupSerializer(roommates, many=True, context=self._context).data
def get_name(self, obj):
query = Roommates.objects.filter(housing=obj).order_by('begin_date').last()
return query.name if query else "Coloc sans nom"
class RoommatesMemberSerializer(serializers.ModelSerializer):
student = StudentSerializer(read_only=True)
edit_api_url = serializers.HyperlinkedIdentityField(
view_name='roommates_api:roommates-member', read_only=True)
class Meta:
model = NamedMembershipRoommates
fields = '__all__'
def create(self, validated_data):
return NamedMembershipRoommates.objects.create(
student=self.student,
group=validated_data['group']
)
class RoommatesGroupSerializer(serializers.ModelSerializer):
"""A serializer for the roommates group."""
members = serializers.SerializerMethodField()
edit_members_api_url = serializers.HyperlinkedIdentityField(
view_name='roommates_api:roommates-members', read_only=True)
edit_api_url = serializers.HyperlinkedIdentityField(
view_name='roommates_api:roommates-group-edit', read_only=True)
class Meta:
model = Roommates
fields = '__all__'
def get_members(self, obj):
members = NamedMembershipRoommates.objects.filter(group=obj.id)
return RoommatesMemberSerializer(members, many=True, context=self._context).data
def create(self, validated_data):
roommates = Roommates(
name=validated_data['name'],
housing=validated_data['housing'],
begin_date=validated_data['begin_date']
)
roommates.save()
for member in self.members:
NamedMembershipRoommates.objects.create(
student=Student.objects.get(id=member['student']),
group=roommates,
nickname=member['nickname']
)
return roommates
'''
<|code_end|>
|
Colocathlon
Préparation du support de fonctionnalités pour le colocathlon, à la demande du BDE. Fonctionnalités demandées :
- [x] Possibilité pour chaque coloc de renseigner ses horaires d'ouvertures et activités proposées, et si elles participent ou non
- [x] Affichage des infos ci-dessous pour tout le monde sur la page de la coloc
- [x] Possibilité pour n'importe quel utilisateur connecté de s'inscrire à une et une seule coloc, sur le principe du shotgun
- [x] Pour les membres de la coloc, voir les utilisateurs inscrits
- [x] Activer un mode "Colocathlon" sur la carte pour afficher uniquement les colocs participantes
- [x] En mode colocathlon, afficher sur la carte les activités et heures proposées au lieu des descriptions et membres dans les infobulles
|
server/apps/group/models.py
<|code_start|>from django.db import models
from django.contrib.auth.models import User
from django.utils.text import slugify
from django.urls.base import reverse
from django.template.loader import render_to_string
from django.utils import timezone
from django_ckeditor_5.fields import CKEditor5Field
from apps.student.models import Student
from apps.utils.upload import PathAndRename
from apps.utils.compress import compressModelImage
from apps.utils.slug import get_object_from_full_slug, get_tuple_from_full_slug, SlugModel
from django.conf import settings
from discord_webhook import DiscordWebhook, DiscordEmbed
import logging
logger = logging.getLogger(__name__)
path_and_rename_group = PathAndRename("groups/logo")
path_and_rename_group_banniere = PathAndRename("groups/banniere")
class Group(models.Model, SlugModel):
'''Modèle abstrait servant de modèle pour tous les types de Groupes.'''
# Nom du groupe
name = models.CharField(verbose_name='Nom du groupe',
unique=True, max_length=100)
alt_name = models.CharField(
verbose_name='Nom alternatif', max_length=100, null=True, blank=True)
# présentation
logo = models.ImageField(
verbose_name='Logo du groupe', blank=True, null=True,
upload_to=path_and_rename_group,
help_text="Votre logo sera affiché au format 306x306 pixels.")
banniere = models.ImageField(
verbose_name='Bannière', blank=True, null=True,
upload_to=path_and_rename_group_banniere,
help_text="Votre bannière sera affichée au format 1320x492 pixels.")
summary = models.CharField('Résumé', max_length=500, null=True, blank=True)
description = CKEditor5Field(
verbose_name='Description du groupe', blank=True)
video1 = models.URLField(
'Lien vidéo 1', max_length=200, null=True, blank=True)
video2 = models.URLField(
'Lien vidéo 2', max_length=200, null=True, blank=True)
# paramètres techniques
members = models.ManyToManyField(
Student, verbose_name='Membres du groupe', related_name='%(class)s_members', through='NamedMembership')
slug = models.SlugField(max_length=40, unique=True, blank=True)
modified_date = models.DateTimeField(auto_now=True)
class Meta:
abstract = True
def __str__(self):
return self.name
def is_admin(self, user: User) -> bool:
"""Indicates if a user is admin."""
if user.is_anonymous or not user.is_authenticated or not hasattr(user, 'student'):
return False
student = Student.objects.filter(user=user).first()
if user.is_superuser or user.is_staff:
return True
if self.is_member(user):
members_list = self.members.through.objects.filter(group=self)
my_member = members_list.filter(student=student).first()
return my_member.admin
return False
def is_member(self, user: User) -> bool:
"""Indicates if a user is member."""
if user.is_anonymous or not user.is_authenticated or not hasattr(user, 'student'):
return False
return user.student in self.members.all()
def save(self, *args, **kwargs):
# cration du slug si non-existant ou corrompu
self.set_slug(self.name, 40)
# compression des images
self.logo = compressModelImage(
self, 'logo', size=(500, 500), contains=True)
self.banniere = compressModelImage(
self, 'banniere', size=(1320, 492), contains=False)
# enregistrement
super(Group, self).save(*args, **kwargs)
@property
def app(self):
return self._meta.app_label
@property
def full_slug(self):
return f'{self.app}--{self.slug}'
# Don't make this a property, Django expects it to be a method.
# Making it a property can cause a 500 error (see issue #553).
def get_absolute_url(self):
return reverse(self.app+':detail', kwargs={'slug': self.slug})
@property
def modelName(self):
'''Plural Model name, used in templates'''
return self.__class__._meta.verbose_name_plural
class NamedMembership(models.Model):
admin = models.BooleanField(default=False)
student = models.ForeignKey(to=Student, on_delete=models.CASCADE)
group = models.ForeignKey(to=Group, on_delete=models.CASCADE)
class Meta:
abstract = True
def __str__(self):
return self.student.__str__()
class AdminRightsRequest(models.Model):
"""A model to request admin rights on a group."""
group = models.SlugField(verbose_name="Groupe demandé.")
student = models.ForeignKey(to=Student, on_delete=models.CASCADE)
date = models.DateField(
verbose_name="Date de la requête", default=timezone.now)
reason = models.CharField(
max_length=100, verbose_name="Raison de la demande", blank=True)
domain = models.CharField(max_length=64)
def save(self, domain: str, *args, **kwargs):
self.date = timezone.now()
self.domain = domain
super(AdminRightsRequest, self).save()
group = get_object_from_full_slug(self.group)
try:
webhook = DiscordWebhook(
url=settings.DISCORD_ADMIN_MODERATION_WEBHOOK)
embed = DiscordEmbed(title=f'{self.student} demande à devenir admin de {group}',
description=self.reason,
color=242424)
embed.add_embed_field(
name='Accepter', value=f"[Accepter]({self.accept_url})", inline=True)
embed.add_embed_field(
name='Refuser', value=f"[Refuser]({self.deny_url})", inline=True)
if(self.student.picture):
embed.thumbnail = {"url": self.student.picture.url}
webhook.add_embed(embed)
webhook.execute()
except Exception as e:
logger.error(e)
super(AdminRightsRequest, self).save()
@ property
def accept_url(self):
app, slug = get_tuple_from_full_slug(self.group)
return f"http://{self.domain}{reverse(app+':accept-admin-req', kwargs={'slug': slug,'id': self.id})}"
@ property
def deny_url(self):
app, slug = get_tuple_from_full_slug(self.group)
return f"http://{self.domain}{reverse(app+':deny-admin-req', kwargs={'slug': slug, 'id': self.id})}"
def accept(self):
group = get_object_from_full_slug(self.group)
if group.is_member(self.student.user):
membership = group.members.through.objects.get(
student=self.student.id, group=group)
membership.admin = True
membership.save()
else:
group.members.through.objects.create(
student=self.student,
group=group,
admin=True
)
mail = render_to_string('group/mail/new_admin.html', {
'group': group,
'user': self.student.user
})
self.student.user.email_user(f'Vous êtes admin de {group}', mail,
'[email protected]', html_message=mail)
webhook = DiscordWebhook(
url=settings.DISCORD_ADMIN_MODERATION_WEBHOOK)
embed = DiscordEmbed(title=f'La demande de {self.student} pour rejoindre {group} a été acceptée.',
description="",
color=00000)
webhook.add_embed(embed)
webhook.execute()
self.delete()
def deny(self):
group = get_object_from_full_slug(self.group)
webhook = DiscordWebhook(
url=settings.DISCORD_ADMIN_MODERATION_WEBHOOK)
embed = DiscordEmbed(title=f'La demande de {self.student} pour rejoindre {group} a été refusée.',
description="",
color=00000)
webhook.add_embed(embed)
webhook.execute()
self.delete()
# FIXME Broken since the move of admins inside of members, nice to fix
# @receiver(m2m_changed, sender=Group.members.through)
# def admins_changed(sender, instance, action, pk_set, reverse, model, **kwargs):
# if isinstance(instance, Group):
# # FIXME temporary fix because this signal shotguns m2m_changed which other can't
# # use. To avoid this we check the instance before to make sure it's a group.
# if action == "post_add":
# for pk in pk_set:
# user = User.objects.get(pk=pk)
# mail = render_to_string('group/mail/new_admin.html', {
# 'group': instance,
# 'user': user
# })
# user.email_user(f'Vous êtes admin de {instance}', mail,
# '[email protected]', html_message=mail)
# elif action == "post_remove":
# for pk in pk_set:
# user = User.objects.get(pk=pk)
# mail = render_to_string('group/mail/remove_admin.html', {
# 'group': instance,
# 'user': user
# })
# user.email_user(
# f'Vous n\'êtes plus membre de {instance}', mail, '[email protected]', html_message=mail)
<|code_end|>
server/apps/roommates/api_urls.py
<|code_start|>from django.urls import path
from .api_views import *
app_name = 'roommates_api'
urlpatterns = [
path('geocoding/', SearchGeocodingView.as_view(), name='geocoding'),
path('housing', HousingView.as_view(), name='housing'),
path('housing/check', CheckAddressView.as_view(), name='address-check'),
]
<|code_end|>
server/apps/roommates/api_views.py
<|code_start|>from rest_framework import generics, permissions
from rest_framework.response import Response
from rest_framework.views import APIView
from .serializers import HousingLastRoommatesSerializer
from .models import Housing
from apps.utils.geocoding import geocode
from django.utils import timezone
from django.db.models import Q
class SearchGeocodingView(APIView):
"""A view to query the external Geocoding service."""
permission_classes = [permissions.IsAuthenticated]
def get(self, request):
return Response(data=geocode(request.GET.get("search_string")))
class HousingView(generics.ListCreateAPIView):
"""API View to get all the housing and their current roommates"""
serializer_class = HousingLastRoommatesSerializer
permission_classes = [permissions.IsAuthenticated]
def get_queryset(self):
colocathlonParticipants = int(self.request.GET.get(
'colocathlonParticipants'))
now = timezone.now()
if colocathlonParticipants == 1:
query = Housing.objects.filter((
Q(Q(roommates__begin_date__lte=now) & (Q(roommates__end_date__gte=now) | Q(roommates__end_date=None))) | Q(roommates__members=None)) & Q(roommates__colocathlon_agree=True)).distinct()
return query
query = Housing.objects.filter(
Q(Q(roommates__begin_date__lte=now) & (Q(roommates__end_date__gte=now) | Q(roommates__end_date=None))) | Q(roommates__members=None)).distinct()
return query
class CheckAddressView(APIView):
"""An API view to check whether a housing already exists at the selected address.
Returns the pk if it does, None otherwise"""
permission_classes = [permissions.IsAuthenticated]
def post(self, request):
query = Housing.objects.filter(address=request.data.get("address"))
data = [{
'pk': housing.pk,
'name': f'{housing.address} - {housing.details} ({housing.current_roommates})'
} for housing in query]
return Response(data=data)
<|code_end|>
|
server/apps/group/models.py
<|code_start|>from django.db import models
from django.contrib.auth.models import User
from django.utils.text import slugify
from django.urls.base import reverse
from django.template.loader import render_to_string
from django.utils import timezone
from django_ckeditor_5.fields import CKEditor5Field
from apps.student.models import Student
from apps.utils.upload import PathAndRename
from apps.utils.compress import compressModelImage
from apps.utils.slug import get_object_from_full_slug, get_tuple_from_full_slug, SlugModel
from django.conf import settings
from discord_webhook import DiscordWebhook, DiscordEmbed
import logging
logger = logging.getLogger(__name__)
path_and_rename_group = PathAndRename("groups/logo")
path_and_rename_group_banniere = PathAndRename("groups/banniere")
class Group(models.Model, SlugModel):
'''Modèle abstrait servant de modèle pour tous les types de Groupes.'''
# Nom du groupe
name = models.CharField(verbose_name='Nom du groupe',
unique=True, max_length=100)
alt_name = models.CharField(
verbose_name='Nom alternatif', max_length=100, null=True, blank=True)
# présentation
logo = models.ImageField(
verbose_name='Logo du groupe', blank=True, null=True,
upload_to=path_and_rename_group,
help_text="Votre logo sera affiché au format 306x306 pixels.")
banniere = models.ImageField(
verbose_name='Bannière', blank=True, null=True,
upload_to=path_and_rename_group_banniere,
help_text="Votre bannière sera affichée au format 1320x492 pixels.")
summary = models.CharField('Résumé', max_length=500, null=True, blank=True)
description = CKEditor5Field(
verbose_name='Description du groupe', blank=True)
video1 = models.URLField(
'Lien vidéo 1', max_length=200, null=True, blank=True)
video2 = models.URLField(
'Lien vidéo 2', max_length=200, null=True, blank=True)
# paramètres techniques
members = models.ManyToManyField(
Student, verbose_name='Membres du groupe', related_name='%(class)s_members', through='NamedMembership')
slug = models.SlugField(max_length=40, unique=True, blank=True)
modified_date = models.DateTimeField(auto_now=True)
class Meta:
abstract = True
def __str__(self):
return self.name
def is_admin(self, user: User) -> bool:
"""Indicates if a user is admin."""
if user.is_anonymous or not user.is_authenticated or not hasattr(user, 'student'):
return False
student = Student.objects.filter(user=user).first()
if user.is_superuser:
return True
if self.is_member(user):
members_list = self.members.through.objects.filter(group=self)
my_member = members_list.filter(student=student).first()
return my_member.admin
return False
def is_member(self, user: User) -> bool:
"""Indicates if a user is member."""
if user.is_anonymous or not user.is_authenticated or not hasattr(user, 'student'):
return False
return user.student in self.members.all()
def save(self, *args, **kwargs):
# cration du slug si non-existant ou corrompu
self.set_slug(self.name, 40)
# compression des images
self.logo = compressModelImage(
self, 'logo', size=(500, 500), contains=True)
self.banniere = compressModelImage(
self, 'banniere', size=(1320, 492), contains=False)
# enregistrement
super(Group, self).save(*args, **kwargs)
@property
def app(self):
return self._meta.app_label
@property
def full_slug(self):
return f'{self.app}--{self.slug}'
# Don't make this a property, Django expects it to be a method.
# Making it a property can cause a 500 error (see issue #553).
def get_absolute_url(self):
return reverse(self.app+':detail', kwargs={'slug': self.slug})
@property
def modelName(self):
'''Plural Model name, used in templates'''
return self.__class__._meta.verbose_name_plural
class NamedMembership(models.Model):
admin = models.BooleanField(default=False)
student = models.ForeignKey(to=Student, on_delete=models.CASCADE)
group = models.ForeignKey(to=Group, on_delete=models.CASCADE)
class Meta:
abstract = True
def __str__(self):
return self.student.__str__()
class AdminRightsRequest(models.Model):
"""A model to request admin rights on a group."""
group = models.SlugField(verbose_name="Groupe demandé.")
student = models.ForeignKey(to=Student, on_delete=models.CASCADE)
date = models.DateField(
verbose_name="Date de la requête", default=timezone.now)
reason = models.CharField(
max_length=100, verbose_name="Raison de la demande", blank=True)
domain = models.CharField(max_length=64)
def save(self, domain: str, *args, **kwargs):
self.date = timezone.now()
self.domain = domain
super(AdminRightsRequest, self).save()
group = get_object_from_full_slug(self.group)
try:
webhook = DiscordWebhook(
url=settings.DISCORD_ADMIN_MODERATION_WEBHOOK)
embed = DiscordEmbed(title=f'{self.student} demande à devenir admin de {group}',
description=self.reason,
color=242424)
embed.add_embed_field(
name='Accepter', value=f"[Accepter]({self.accept_url})", inline=True)
embed.add_embed_field(
name='Refuser', value=f"[Refuser]({self.deny_url})", inline=True)
if(self.student.picture):
embed.thumbnail = {"url": self.student.picture.url}
webhook.add_embed(embed)
webhook.execute()
except Exception as e:
logger.error(e)
super(AdminRightsRequest, self).save()
@ property
def accept_url(self):
app, slug = get_tuple_from_full_slug(self.group)
return f"http://{self.domain}{reverse(app+':accept-admin-req', kwargs={'slug': slug,'id': self.id})}"
@ property
def deny_url(self):
app, slug = get_tuple_from_full_slug(self.group)
return f"http://{self.domain}{reverse(app+':deny-admin-req', kwargs={'slug': slug, 'id': self.id})}"
def accept(self):
group = get_object_from_full_slug(self.group)
if group.is_member(self.student.user):
membership = group.members.through.objects.get(
student=self.student.id, group=group)
membership.admin = True
membership.save()
else:
group.members.through.objects.create(
student=self.student,
group=group,
admin=True
)
mail = render_to_string('group/mail/new_admin.html', {
'group': group,
'user': self.student.user
})
self.student.user.email_user(f'Vous êtes admin de {group}', mail,
'[email protected]', html_message=mail)
webhook = DiscordWebhook(
url=settings.DISCORD_ADMIN_MODERATION_WEBHOOK)
embed = DiscordEmbed(title=f'La demande de {self.student} pour rejoindre {group} a été acceptée.',
description="",
color=00000)
webhook.add_embed(embed)
webhook.execute()
self.delete()
def deny(self):
group = get_object_from_full_slug(self.group)
webhook = DiscordWebhook(
url=settings.DISCORD_ADMIN_MODERATION_WEBHOOK)
embed = DiscordEmbed(title=f'La demande de {self.student} pour rejoindre {group} a été refusée.',
description="",
color=00000)
webhook.add_embed(embed)
webhook.execute()
self.delete()
# FIXME Broken since the move of admins inside of members, nice to fix
# @receiver(m2m_changed, sender=Group.members.through)
# def admins_changed(sender, instance, action, pk_set, reverse, model, **kwargs):
# if isinstance(instance, Group):
# # FIXME temporary fix because this signal shotguns m2m_changed which other can't
# # use. To avoid this we check the instance before to make sure it's a group.
# if action == "post_add":
# for pk in pk_set:
# user = User.objects.get(pk=pk)
# mail = render_to_string('group/mail/new_admin.html', {
# 'group': instance,
# 'user': user
# })
# user.email_user(f'Vous êtes admin de {instance}', mail,
# '[email protected]', html_message=mail)
# elif action == "post_remove":
# for pk in pk_set:
# user = User.objects.get(pk=pk)
# mail = render_to_string('group/mail/remove_admin.html', {
# 'group': instance,
# 'user': user
# })
# user.email_user(
# f'Vous n\'êtes plus membre de {instance}', mail, '[email protected]', html_message=mail)
<|code_end|>
server/apps/roommates/api_urls.py
<|code_start|>from django.urls import path
from .api_views import *
app_name = 'roommates_api'
urlpatterns = [
path('geocoding/', SearchGeocodingView.as_view(), name='geocoding'),
path('housing', HousingView.as_view(), name='housing'),
path('housing/check', CheckAddressView.as_view(), name='address-check'),
path('roommates-details', RoommatesDetails.as_view(), name="roommates-details")
]
<|code_end|>
server/apps/roommates/api_views.py
<|code_start|>from django.http.response import HttpResponse
from rest_framework import generics, permissions
from rest_framework.response import Response
from rest_framework.views import APIView
from .serializers import HousingLastRoommatesSerializer, RoommatesSerializer
from .models import Housing, Roommates
from apps.utils.geocoding import geocode
from django.utils import timezone
from django.db.models import Q
class SearchGeocodingView(APIView):
"""A view to query the external Geocoding service."""
permission_classes = [permissions.IsAuthenticated]
def get(self, request):
return Response(data=geocode(request.GET.get("search_string")))
class HousingView(generics.ListCreateAPIView):
"""API View to get all the housing and their current roommates"""
serializer_class = HousingLastRoommatesSerializer
permission_classes = [permissions.IsAuthenticated]
def get_queryset(self):
colocathlonParticipants = int(self.request.GET.get(
'colocathlonParticipants'))
now = timezone.now()
if colocathlonParticipants == 1:
query = Housing.objects.filter((
Q(Q(roommates__begin_date__lte=now) & (Q(roommates__end_date__gte=now) | Q(roommates__end_date=None))) | Q(roommates__members=None)) & Q(roommates__colocathlon_agree=True)).distinct()
return query
query = Housing.objects.filter(
Q(Q(roommates__begin_date__lte=now) & (Q(roommates__end_date__gte=now) | Q(roommates__end_date=None))) | Q(roommates__members=None)).distinct()
return query
class CheckAddressView(APIView):
"""An API view to check whether a housing already exists at the selected address.
Returns the pk if it does, None otherwise"""
permission_classes = [permissions.IsAuthenticated]
def post(self, request):
query = Housing.objects.filter(address=request.data.get("address"))
data = [{
'pk': housing.pk,
'name': f'{housing.address} - {housing.details} ({housing.current_roommates})'
} for housing in query]
return Response(data=data)
class RoommatesDetails(APIView):
"""An API view to return the details of a roommates instance"""
serializer_class = RoommatesSerializer
permission_classes = [permissions.IsAuthenticated]
def get(self, request):
roommatesSlug = self.request.GET.get(
'slug')
roommates = [generics.get_object_or_404(Roommates, slug=roommatesSlug)]
serializer = self.serializer_class(roommates, many=True)
return Response(serializer.data)
def post(self, request):
roommates = generics.get_object_or_404(
Roommates, slug=request.data.get("slug"))
if not roommates.colocathlon_agree:
return Response(status=403)
addOrDelete = int(request.data.get("addOrDelete"))
# addOrDelete == 1 -> Delete user
# addOrDelete == 0 -> Add user
if addOrDelete == 0:
if roommates.colocathlon_quota > roommates.colocathlon_participants.count():
roommates.colocathlon_participants.add(request.user.student)
return Response(status=200)
return Response(status=403)
roommates.colocathlon_participants.remove(request.user.student)
return Response(status=200)
<|code_end|>
|
Colocathlon
Préparation du support de fonctionnalités pour le colocathlon, à la demande du BDE. Fonctionnalités demandées :
- [x] Possibilité pour chaque coloc de renseigner ses horaires d'ouvertures et activités proposées, et si elles participent ou non
- [x] Affichage des infos ci-dessous pour tout le monde sur la page de la coloc
- [x] Possibilité pour n'importe quel utilisateur connecté de s'inscrire à une et une seule coloc, sur le principe du shotgun
- [x] Pour les membres de la coloc, voir les utilisateurs inscrits
- [x] Activer un mode "Colocathlon" sur la carte pour afficher uniquement les colocs participantes
- [x] En mode colocathlon, afficher sur la carte les activités et heures proposées au lieu des descriptions et membres dans les infobulles
|
server/apps/club/api_views.py
<|code_start|>from django.http.response import HttpResponse
from rest_framework import generics, permissions
from rest_framework.response import Response
from django.shortcuts import get_object_or_404
from django.db.models import Q
from django.utils import timezone
from django.utils.dateparse import parse_date
from rest_framework.views import APIView
from .models import Club, NamedMembershipClub
from apps.student.models import Student
from .serializers import *
class ListMyClubAPIView(generics.ListAPIView):
"""List all the clubs of a student."""
serializer_class = ClubSerializer
permission_classes = [permissions.IsAuthenticated]
def get_queryset(self):
if self.request.user.is_authenticated:
allMembersClub = Club.objects.filter(
members__user=self.request.user)
return allMembersClub
class ListClubMembersAPIView(APIView):
"""API endpoint to interact with the members of a club."""
permission_classes = [permissions.IsAuthenticated]
def get(self, request, format=None):
clubSlug = request.query_params.get('slug')
club = get_object_or_404(Club, slug=clubSlug)
date_end = timezone.make_aware(timezone.now().today())
namedMemberships = club.members.through.objects.filter(
Q(group=club) & (Q(date_end__isnull=True) | Q(date_end__gt=date_end))
).order_by('student__user__first_name')
serializer = ClubMemberSerializer(namedMemberships, many=True)
return Response(data=serializer.data)
def post(self, request, *args, **kwargs):
# Check if club's admin
user = self.request.user
student = Student.objects.get(user=user)
clubSlug = request.query_params.get('slug')
club = get_object_or_404(Club, slug=clubSlug)
toCheckIfAdmin = get_object_or_404(
NamedMembershipClub, group=club, student=student)
if not (toCheckIfAdmin.admin or user.is_staff):
return HttpResponse(status=403)
editMode = request.data.get("editMode")
# editMode == 1 -> Edit the order of the members
# editMode == 2 -> Edit a member
# editMode == 3 -> Delete a member
if editMode == 1:
newOrderedMembers = request.data.get("orderedMembers")
for member in newOrderedMembers:
NamedMembershipClub.objects.filter(
id=member.get("id")).update(order=member.get("order"))
return HttpResponse(status=200)
elif editMode == 2:
id = request.data.get("id")
role = request.data.get("role")
beginDate = parse_date(request.data.get("beginDate")) if request.data.get(
"beginDate") is not None else None
endDate = parse_date(request.data.get("endDate")) if request.data.get(
"endDate") is not None else None
admin = request.data.get("admin")
NamedMembershipClub.objects.filter(
id=id).update(function=role, admin=admin, date_begin=beginDate, date_end=endDate)
return HttpResponse(status=200)
elif editMode == 3:
id = request.data.get("id")
NamedMembershipClub.objects.get(id=id).delete()
return HttpResponse(status=200)
<|code_end|>
server/apps/club/models.py
<|code_start|>from django.db import models
from django.db.models import F
from django.utils import timezone
from django.core.cache import cache
from django.core.cache.utils import make_template_fragment_key
from apps.group.models import Group, NamedMembership
from apps.student.models import Student
class Club(Group):
members = models.ManyToManyField(Student, through='NamedMembershipClub')
bdx_type = models.ForeignKey(
'BDX', on_delete=models.SET_NULL, verbose_name='Type de club BDX', null=True, blank=True)
email = models.EmailField("Email de l'asso", max_length=50, null=True, blank=True)
meeting_place = models.CharField("Local / Lieu de réunion", max_length=50, null=True, blank=True)
meeting_hour = models.CharField("Heure et jour de réunion périodique", max_length=50, null=True, blank=True)
class Meta:
ordering = [F('bdx_type').asc(nulls_first=True), 'name']
verbose_name = "club/asso"
verbose_name_plural = "clubs & assos"
def is_admin(self, user) -> bool:
is_admin = super(Club, self).is_admin(user)
if not is_admin and self.bdx_type:
return self.bdx_type.is_admin(user)
else:
return is_admin
def save(self, *args, **kwargs):
# mise à jour du cache de la liste des clubs
key = make_template_fragment_key('club_list')
cache.delete(key)
# enregistrement
super().save(*args, **kwargs)
class BDX(Club):
'''Groupe représentant un BDX.'''
order = models.IntegerField(default=0)
class Meta:
ordering = ['order']
verbose_name_plural = "BDX"
class NamedMembershipClub(NamedMembership):
group = models.ForeignKey(Club, on_delete=models.CASCADE)
function = models.CharField(
verbose_name='Rôle (facultatif)', max_length=200, blank=True)
date_begin = models.DateField(verbose_name='Date de début', default=timezone.now().today)
date_end = models.DateField(verbose_name='Date de fin (facultatif)', blank=True, null=True)
order = models.IntegerField(verbose_name='Hiérarchie', default=0)
@property
def year(self, **kwargs):
'''Renvoie l'année scolaire où l'étudiant est devenu membre.
On renvoie seulement la 2eme année de l'année scolaire.'''
y = self.date_begin.year
m = self.date_begin.month
if m >= 8:
return y + 1
else:
return y<|code_end|>
server/apps/student/api_urls.py
<|code_start|>from django.conf.urls import url
from django.urls import path
from .api_views import *
app_name = 'student'
urlpatterns = [
path('<slug:student_id>/courses/',
StudentCoursesView.as_view(), name='courses'),
path('<slug:student_id>/courses/<slug:pk>',
StudentEditNamedMembershipCourse.as_view(), name='unfollow-course'),
path('', StudentList.as_view(), name='list')
]
<|code_end|>
|
server/apps/club/api_views.py
<|code_start|>from django.http.response import HttpResponse
from rest_framework import generics, permissions
from rest_framework.response import Response
from django.shortcuts import get_object_or_404
from django.db.models import Q
from django.utils import timezone
from django.utils.dateparse import parse_date
from rest_framework.views import APIView
from .models import Club, NamedMembershipClub
from apps.student.models import Student
from .serializers import *
class ListMyClubAPIView(generics.ListAPIView):
"""List all the clubs of a student."""
serializer_class = ClubSerializer
permission_classes = [permissions.IsAuthenticated]
def get_queryset(self):
if self.request.user.is_authenticated:
allMembersClub = Club.objects.filter(
members__user=self.request.user)
return allMembersClub
class ListClubMembersAPIView(APIView):
"""API endpoint to interact with the members of a club."""
permission_classes = [permissions.IsAuthenticated]
def get(self, request, format=None):
clubSlug = request.query_params.get('slug')
club = get_object_or_404(Club, slug=clubSlug)
date_end = timezone.make_aware(timezone.now().today())
namedMemberships = club.members.through.objects.filter(
Q(group=club) & (Q(date_end__isnull=True) | Q(date_end__gt=date_end))
).order_by('student__user__first_name')
serializer = ClubMemberSerializer(namedMemberships, many=True)
return Response(data=serializer.data)
def post(self, request, *args, **kwargs):
# Check if club's admin
user = self.request.user
student = Student.objects.get(user=user)
clubSlug = request.query_params.get('slug')
club = get_object_or_404(Club, slug=clubSlug)
toCheckIfAdmin = get_object_or_404(
NamedMembershipClub, group=club, student=student)
if not (toCheckIfAdmin.admin or user.is_staff):
return HttpResponse(status=403)
editMode = request.data.get("editMode")
# editMode == 1 -> Edit the order of the members
# editMode == 2 -> Edit a member
# editMode == 3 -> Delete a member
# editMode == 4 -> Add a member
if editMode == 1:
newOrderedMembers = request.data.get("orderedMembers")
for member in newOrderedMembers:
NamedMembershipClub.objects.filter(
id=member.get("id")).update(order=member.get("order"))
return HttpResponse(status=200)
elif editMode == 2:
id = request.data.get("id")
role = request.data.get("role")
beginDate = parse_date(request.data.get("beginDate")) if request.data.get(
"beginDate") is not None else None
endDate = parse_date(request.data.get("endDate")) if request.data.get(
"endDate") is not None else None
admin = request.data.get("admin")
NamedMembershipClub.objects.filter(
id=id).update(function=role, admin=admin, date_begin=beginDate, date_end=endDate)
return HttpResponse(status=200)
elif editMode == 3:
id = request.data.get("id")
NamedMembershipClub.objects.get(id=id).delete()
return HttpResponse(status=200)
elif editMode == 4:
studentIDToAdd = request.data.get("id")
studentToAdd = Student.objects.get(id=studentIDToAdd)
# Check if student already exists
if NamedMembershipClub.objects.filter(
student=studentToAdd, group=club).exists():
return HttpResponse(status=403)
admin = request.data.get("admin")
function = request.data.get("function")
beginDate = parse_date(request.data.get("date_begin")) if request.data.get(
"beginDate") is not None else None
endDate = parse_date(request.data.get("date_end")) if request.data.get(
"endDate") is not None else None
# Check if dates are valid
if beginDate is not None and endDate is not None and beginDate > endDate:
return HttpResponse(status=500)
if beginDate is not None:
NamedMembershipClub.objects.create(
group=club,
student=studentToAdd,
admin=admin,
function=function,
date_begin=beginDate,
date_end=endDate).save()
else:
NamedMembershipClub.objects.create(
group=club,
student=studentToAdd,
admin=admin,
function=function).save()
return HttpResponse(status=200)
<|code_end|>
server/apps/club/models.py
<|code_start|>from django.db import models
from django.db.models import F
from django.utils import timezone
from django.core.cache import cache
from django.core.cache.utils import make_template_fragment_key
from apps.group.models import Group, NamedMembership
from apps.student.models import Student
class Club(Group):
members = models.ManyToManyField(Student, through='NamedMembershipClub')
bdx_type = models.ForeignKey(
'BDX', on_delete=models.SET_NULL, verbose_name='Type de club BDX', null=True, blank=True)
email = models.EmailField(
"Email de l'asso", max_length=50, null=True, blank=True)
meeting_place = models.CharField(
"Local / Lieu de réunion", max_length=50, null=True, blank=True)
meeting_hour = models.CharField(
"Heure et jour de réunion périodique", max_length=50, null=True, blank=True)
class Meta:
ordering = [F('bdx_type').asc(nulls_first=True), 'name']
verbose_name = "club/asso"
verbose_name_plural = "clubs & assos"
def is_admin(self, user) -> bool:
is_admin = super(Club, self).is_admin(user)
if not is_admin and self.bdx_type:
return self.bdx_type.is_admin(user)
else:
return is_admin
def save(self, *args, **kwargs):
# mise à jour du cache de la liste des clubs
key = make_template_fragment_key('club_list')
cache.delete(key)
# enregistrement
super().save(*args, **kwargs)
class BDX(Club):
'''Groupe représentant un BDX.'''
order = models.IntegerField(default=0)
class Meta:
ordering = ['order']
verbose_name_plural = "BDX"
class NamedMembershipClub(NamedMembership):
group = models.ForeignKey(Club, on_delete=models.CASCADE)
function = models.CharField(
verbose_name='Rôle (facultatif)', max_length=200, blank=True)
date_begin = models.DateField(
verbose_name='Date de début', default=timezone.now().today)
date_end = models.DateField(
verbose_name='Date de fin (facultatif)', blank=True, null=True)
order = models.IntegerField(verbose_name='Hiérarchie', default=0)
@property
def year(self, **kwargs):
'''Renvoie l'année scolaire où l'étudiant est devenu membre.
On renvoie seulement la 2eme année de l'année scolaire.'''
y = self.date_begin.year
m = self.date_begin.month
if m >= 8:
return y + 1
else:
return y
def get_or_create(self, **kwargs):
defaults = kwargs.pop('defaults', {}) # popping defaults from values
for key, value in kwargs.items():
if value == None:
kwargs[key] = defaults.get(key)
return super(NamedMembershipClub, self).get_or_create(**kwargs)
<|code_end|>
server/apps/student/api_urls.py
<|code_start|>from django.urls import path
from .api_views import *
app_name = 'student'
urlpatterns = [
path('<slug:student_id>/courses/',
StudentCoursesView.as_view(), name='courses'),
path('<slug:student_id>/courses/<slug:pk>',
StudentEditNamedMembershipCourse.as_view(), name='unfollow-course'),
path('', StudentList.as_view(), name='list')
]
<|code_end|>
|
Add vanilla GRU
We want to a simple vanilla GRU for time series forecasting maybe with the probabilistic component as well.
|
flood_forecast/basic/gru_vanilla.py
<|code_start|><|code_end|>
flood_forecast/model_dict_function.py
<|code_start|>from flood_forecast.transformer_xl.multi_head_base import MultiAttnHeadSimple
from flood_forecast.transformer_xl.transformer_basic import SimpleTransformer, CustomTransformerDecoder
from flood_forecast.transformer_xl.informer import Informer
from flood_forecast.transformer_xl.transformer_xl import TransformerXL
from flood_forecast.transformer_xl.dummy_torch import DummyTorchModel
from flood_forecast.basic.linear_regression import SimpleLinearModel
from flood_forecast.basic.lstm_vanilla import LSTMForecast
from flood_forecast.custom.custom_opt import BertAdam
from torch.optim import Adam, SGD
from torch.nn import MSELoss, SmoothL1Loss, PoissonNLLLoss, L1Loss, CrossEntropyLoss, BCELoss, BCEWithLogitsLoss
from flood_forecast.basic.linear_regression import simple_decode
from flood_forecast.transformer_xl.transformer_basic import greedy_decode
from flood_forecast.custom.focal_loss import FocalLoss
from flood_forecast.da_rnn.model import DARNN
from flood_forecast.custom.custom_opt import (RMSELoss, MAPELoss, PenalizedMSELoss, NegativeLogLikelihood, MASELoss,
GaussianLoss)
from flood_forecast.transformer_xl.transformer_bottleneck import DecoderTransformer
from flood_forecast.custom.dilate_loss import DilateLoss
from flood_forecast.meta_models.basic_ae import AE
from flood_forecast.transformer_xl.dsanet import DSANet
"""
Utility dictionaries to map a string to a class
"""
pytorch_model_dict = {
"MultiAttnHeadSimple": MultiAttnHeadSimple,
"SimpleTransformer": SimpleTransformer,
"TransformerXL": TransformerXL,
"DummyTorchModel": DummyTorchModel,
"LSTM": LSTMForecast,
"SimpleLinearModel": SimpleLinearModel,
"CustomTransformerDecoder": CustomTransformerDecoder,
"DARNN": DARNN,
"DecoderTransformer": DecoderTransformer,
"BasicAE": AE,
"Informer": Informer,
"DSANet": DSANet
}
pytorch_criterion_dict = {
"GaussianLoss": GaussianLoss,
"MASELoss": MASELoss,
"MSE": MSELoss,
"SmoothL1Loss": SmoothL1Loss,
"PoissonNLLLoss": PoissonNLLLoss,
"RMSE": RMSELoss,
"MAPE": MAPELoss,
"DilateLoss": DilateLoss,
"L1": L1Loss,
"PenalizedMSELoss": PenalizedMSELoss,
"CrossEntropyLoss": CrossEntropyLoss,
"NegativeLogLikelihood": NegativeLogLikelihood,
"BCELossLogits": BCEWithLogitsLoss,
"FocalLoss": FocalLoss,
"BinaryCrossEntropy": BCELoss}
decoding_functions = {"greedy_decode": greedy_decode, "simple_decode": simple_decode}
pytorch_opt_dict = {"Adam": Adam, "SGD": SGD, "BertAdam": BertAdam}
<|code_end|>
|
flood_forecast/basic/gru_vanilla.py
<|code_start|>import torch
class VanillaGRU(torch.nn.Module):
def __init__(self, n_time_series: int, hidden_dim: int, num_layers: int, n_target: int, dropout: float,
forecast_length=1, use_hidden=False):
"""
Simple GRU to preform deep time series forecasting.
:param n_time_series:
:type n_time_series:
:param hidden_dim:
:type hidden_dim:
"""
super(VanillaGRU, self).__init__()
# Defining the number of layers and the nodes in each layer
self.layer_dim = num_layers
self.hidden_dim = hidden_dim
self.hidden = None
self.use_hidden = use_hidden
self.forecast_length = forecast_length
# GRU layers
self.gru = torch.nn.GRU(
n_time_series, hidden_dim, num_layers, batch_first=True, dropout=dropout
)
# Fully connected layer
self.fc = torch.nn.Linear(hidden_dim, n_target)
def forward(self, x: torch.Tensor) -> torch.Tensor:
"""Forward function for GRU
:param x: torch of shape
:type model: torch.Tensor
:return: Returns a tensor of shape (batch_size, forecast_length, n_target) or (batch_size, n_target)
:rtype: torch.Tensor
"""
# Initializing hidden state for first input with zeros
if self.hidden is not None and self.use_hidden:
h0 = self.hidden
else:
h0 = torch.zeros(self.layer_dim, x.size(0), self.hidden_dim).requires_grad_()
# Forward propagation by passing in the input and hidden state into the model
out, self.hidden = self.gru(x, h0.detach())
# Reshaping the outputs in the shape of (batch_size, seq_length, hidden_size)
# so that it can fit into the fully connected layer
out = out[:, -self.forecast_length, :]
# Convert the final state to our desired output shape (batch_size, output_dim)
out = self.fc(out)
return out
<|code_end|>
flood_forecast/model_dict_function.py
<|code_start|>from flood_forecast.transformer_xl.multi_head_base import MultiAttnHeadSimple
from flood_forecast.transformer_xl.transformer_basic import SimpleTransformer, CustomTransformerDecoder
from flood_forecast.transformer_xl.informer import Informer
from flood_forecast.transformer_xl.transformer_xl import TransformerXL
from flood_forecast.transformer_xl.dummy_torch import DummyTorchModel
from flood_forecast.basic.linear_regression import SimpleLinearModel
from flood_forecast.basic.lstm_vanilla import LSTMForecast
from flood_forecast.custom.custom_opt import BertAdam
from torch.optim import Adam, SGD
from torch.nn import MSELoss, SmoothL1Loss, PoissonNLLLoss, L1Loss, CrossEntropyLoss, BCELoss, BCEWithLogitsLoss
from flood_forecast.basic.linear_regression import simple_decode
from flood_forecast.transformer_xl.transformer_basic import greedy_decode
from flood_forecast.custom.focal_loss import FocalLoss
from flood_forecast.da_rnn.model import DARNN
from flood_forecast.custom.custom_opt import (RMSELoss, MAPELoss, PenalizedMSELoss, NegativeLogLikelihood, MASELoss,
GaussianLoss)
from flood_forecast.transformer_xl.transformer_bottleneck import DecoderTransformer
from flood_forecast.custom.dilate_loss import DilateLoss
from flood_forecast.meta_models.basic_ae import AE
from flood_forecast.transformer_xl.dsanet import DSANet
from flood_forecast.basic.gru_vanilla import VanillaGRU
"""
Utility dictionaries to map a string to a class
"""
pytorch_model_dict = {
"MultiAttnHeadSimple": MultiAttnHeadSimple,
"SimpleTransformer": SimpleTransformer,
"TransformerXL": TransformerXL,
"DummyTorchModel": DummyTorchModel,
"LSTM": LSTMForecast,
"SimpleLinearModel": SimpleLinearModel,
"CustomTransformerDecoder": CustomTransformerDecoder,
"DARNN": DARNN,
"DecoderTransformer": DecoderTransformer,
"BasicAE": AE,
"Informer": Informer,
"DSANet": DSANet,
"VanillaGRU": VanillaGRU
}
pytorch_criterion_dict = {
"GaussianLoss": GaussianLoss,
"MASELoss": MASELoss,
"MSE": MSELoss,
"SmoothL1Loss": SmoothL1Loss,
"PoissonNLLLoss": PoissonNLLLoss,
"RMSE": RMSELoss,
"MAPE": MAPELoss,
"DilateLoss": DilateLoss,
"L1": L1Loss,
"PenalizedMSELoss": PenalizedMSELoss,
"CrossEntropyLoss": CrossEntropyLoss,
"NegativeLogLikelihood": NegativeLogLikelihood,
"BCELossLogits": BCEWithLogitsLoss,
"FocalLoss": FocalLoss,
"BinaryCrossEntropy": BCELoss}
decoding_functions = {"greedy_decode": greedy_decode, "simple_decode": simple_decode}
pytorch_opt_dict = {"Adam": Adam, "SGD": SGD, "BertAdam": BertAdam}
<|code_end|>
|
geodisplay.py projection not working with any other options but the default
While using the GeographicPlotDisplay function when trying to use other cartopy projections the map extent changes or no data appears.
The default is PlateCarree and works fine but when trying to use a Mercator projection the data shows up but the map extent is changed.

When using a LambertConformal projection the extent appears to be off since no features appear that were suppose to be plotted.

|
act/plotting/GeoDisplay.py
<|code_start|>"""
act.plotting.GeoDisplay
-----------------------
Stores the class for GeographicPlotDisplay.
"""
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from .plot import Display
try:
import cartopy.crs as ccrs
from cartopy.io.img_tiles import Stamen
import cartopy.feature as cfeature
CARTOPY_AVAILABLE = True
except ImportError:
CARTOPY_AVAILABLE = False
class GeographicPlotDisplay(Display):
"""
A class for making geographic tracer plot of aircraft, ship or other moving
platform plot..
This is inherited from the :func:`act.plotting.Display`
class and has therefore has the same attributes as that class.
See :func:`act.plotting.Display`
for more information. There are no additional attributes or parameters
to this class.
In order to create geographic plots, ACT needs the Cartopy package to be
installed on your system. More information about
Cartopy go here:https://scitools.org.uk/cartopy/docs/latest/ .
"""
def __init__(self, obj, ds_name=None, **kwargs):
if not CARTOPY_AVAILABLE:
raise ImportError("Cartopy needs to be installed on your "
"system to make geographic display plots.")
super().__init__(obj, ds_name, **kwargs)
if self.fig is None:
self.fig = plt.figure(**kwargs)
def geoplot(self, data_field=None, lat_field='lat',
lon_field='lon', dsname=None, cbar_label=None, title=None,
projection=ccrs.PlateCarree(), plot_buffer=0.08,
stamen='terrain-background', tile=8, cartopy_feature=None,
cmap='rainbow', text=None, gridlines=True, **kwargs):
"""
Creates a latttude and longitude plot of a time series data set with
data values indicated by color and described with a colorbar.
Latitude values must be in degree north (-90 to 90) and
longitude must be in degree east (-180 to 180).
Parameters
----------
data_field : str
Name of data filed in object to plot.
lat_field : str
Name of latitude field in object to use.
lon_field : str
Name of longitude field in object to use.
dsname : str or None
The name of the datastream to plot. Set to None to make ACT
attempt to automatically determine this.
cbar_label : str
Label to use with colorbar. If set to None will attempt
to create label from long_name and units.
title : str
Plot title.
projection : str
Project to use on plot.
plot_buffer : float
Buffer to add around data on plot in lat and lon dimension.
stamen : str
Dataset to use for background image. Set to None to not use
background image.
tile : int
Tile zoom to use with background image. Higer number indicates
more resolution. A value of 8 is typical for a normal sonde plot.
cartopy_feature : list of str or str
Cartopy feature to add to plot.
cmap : str
Color map to use for colorbar.
text : dictionary
Dictionary of {text:[lon,lat]} to add to plot. Can have more
than one set of text to add.
gridlines : boolean
Use latitude and longitude gridlines.
**kwargs : keyword arguments
Any other keyword arguments that will be passed
into :func:`matplotlib.pyplot.scatter` when the figure
is made. See the matplotlib documentation for further details
on what keyword arguments are available.
"""
# Get current plotting figure
# del self.axes
# if self.fig is None:
# self.fig = plt.figure()
if dsname is None and len(self._arm.keys()) > 1:
raise ValueError(("You must choose a datastream when there are 2 "
"or more datasets in the GeographicPlotDisplay "
"object."))
elif dsname is None:
dsname = list(self._arm.keys())[0]
if data_field is None:
raise ValueError(("You must enter the name of the data "
"to be plotted."))
# Extract data from object
try:
lat = self._arm[dsname][lat_field].values
except KeyError:
raise ValueError(("You will need to provide the name of the "
"field if not '{}' to use for latitued "
"data.").format(lat_field))
try:
lon = self._arm[dsname][lon_field].values
except KeyError:
raise ValueError(("You will need to provide the name of the "
"field if not '{}' to use for longitude "
"data.").format(lon_field))
# Set up metadata information for display on plot
if cbar_label is None:
try:
cbar_label = (
self._arm[dsname][data_field].attrs['long_name'] +
' (' + self._arm[dsname][data_field].attrs['units'] + ')')
except KeyError:
cbar_label = data_field
lat_limits = [np.nanmin(lat), np.nanmax(lat)]
lon_limits = [np.nanmin(lon), np.nanmax(lon)]
box_size = np.max([np.abs(np.diff(lat_limits)),
np.abs(np.diff(lon_limits))])
bx_buf = box_size * plot_buffer
lat_center = np.sum(lat_limits) / 2.
lon_center = np.sum(lon_limits) / 2.
lat_limits = [lat_center - box_size / 2. - bx_buf,
lat_center + box_size / 2. + bx_buf]
lon_limits = [lon_center - box_size / 2. - bx_buf,
lon_center + box_size / 2. + bx_buf]
data = self._arm[dsname][data_field].values
# Create base plot projection
ax = plt.axes(projection=projection)
plt.subplots_adjust(left=0.01, right=0.99, bottom=0.05, top=0.93)
ax.set_extent([lon_limits[0], lon_limits[1], lat_limits[0],
lat_limits[1]], crs=projection)
if title is None:
try:
dim = list(self._arm[dsname][data_field].dims)
ts = pd.to_datetime(str(self._arm[dsname][dim[0]].values[0]))
date = ts.strftime('%Y-%m-%d')
time_str = ts.strftime('%H:%M:%S')
plt.title(' '.join([dsname, 'at', date, time_str]))
except NameError:
plt.title(dsname)
else:
plt.title(title)
if stamen:
tiler = Stamen(stamen)
ax.add_image(tiler, tile)
colorbar_map = None
if cmap is not None:
colorbar_map = plt.cm.get_cmap(cmap)
sc = ax.scatter(lon, lat, c=data, cmap=colorbar_map, **kwargs)
cbar = plt.colorbar(sc)
cbar.ax.set_ylabel(cbar_label)
if cartopy_feature is not None:
if isinstance(cartopy_feature, str):
cartopy_feature = [cartopy_feature]
cartopy_feature = [ii.upper() for ii in cartopy_feature]
if 'STATES' in cartopy_feature:
ax.add_feature(cfeature.STATES.with_scale('10m'))
if 'LAND' in cartopy_feature:
ax.add_feature(cfeature.LAND)
if 'OCEAN' in cartopy_feature:
ax.add_feature(cfeature.OCEAN)
if 'COASTLINE' in cartopy_feature:
ax.add_feature(cfeature.COASTLINE)
if 'BORDERS' in cartopy_feature:
ax.add_feature(cfeature.BORDERS, linestyle=':')
if 'LAKES' in cartopy_feature:
ax.add_feature(cfeature.LAKES, alpha=0.5)
if 'RIVERS' in cartopy_feature:
ax.add_feature(cfeature.RIVERS)
if text is not None:
for label, location in text.items():
ax.plot(location[0], location[1], marker='*', color='black')
ax.text(location[0], location[1], label, color='black')
if gridlines:
gl = ax.gridlines(crs=ccrs.PlateCarree(), draw_labels=True,
linewidth=1, color='gray', alpha=0.5,
linestyle='--')
gl.xlabels_top = False
gl.ylabels_left = True
gl.xlabels_bottom = True
gl.ylabels_right = False
gl.xlabel_style = {'size': 6, 'color': 'gray'}
gl.ylabel_style = {'size': 6, 'color': 'gray'}
return ax
<|code_end|>
|
act/plotting/GeoDisplay.py
<|code_start|>"""
act.plotting.GeoDisplay
-----------------------
Stores the class for GeographicPlotDisplay.
"""
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from .plot import Display
try:
import cartopy.crs as ccrs
from cartopy.io.img_tiles import Stamen
import cartopy.feature as cfeature
CARTOPY_AVAILABLE = True
except ImportError:
CARTOPY_AVAILABLE = False
class GeographicPlotDisplay(Display):
"""
A class for making geographic tracer plot of aircraft, ship or other moving
platform plot..
This is inherited from the :func:`act.plotting.Display`
class and has therefore has the same attributes as that class.
See :func:`act.plotting.Display`
for more information. There are no additional attributes or parameters
to this class.
In order to create geographic plots, ACT needs the Cartopy package to be
installed on your system. More information about
Cartopy go here:https://scitools.org.uk/cartopy/docs/latest/ .
"""
def __init__(self, obj, ds_name=None, **kwargs):
if not CARTOPY_AVAILABLE:
raise ImportError("Cartopy needs to be installed on your "
"system to make geographic display plots.")
super().__init__(obj, ds_name, **kwargs)
if self.fig is None:
self.fig = plt.figure(**kwargs)
def geoplot(self, data_field=None, lat_field='lat',
lon_field='lon', dsname=None, cbar_label=None, title=None,
projection=ccrs.PlateCarree(), plot_buffer=0.08,
stamen='terrain-background', tile=8, cartopy_feature=None,
cmap='rainbow', text=None, gridlines=True, **kwargs):
"""
Creates a latttude and longitude plot of a time series data set with
data values indicated by color and described with a colorbar.
Latitude values must be in degree north (-90 to 90) and
longitude must be in degree east (-180 to 180).
Parameters
----------
data_field : str
Name of data filed in object to plot.
lat_field : str
Name of latitude field in object to use.
lon_field : str
Name of longitude field in object to use.
dsname : str or None
The name of the datastream to plot. Set to None to make ACT
attempt to automatically determine this.
cbar_label : str
Label to use with colorbar. If set to None will attempt
to create label from long_name and units.
title : str
Plot title.
projection : cartopy.crs object
Project to use on plot. See
https://scitools.org.uk/cartopy/docs/latest/crs/projections.html
plot_buffer : float
Buffer to add around data on plot in lat and lon dimension.
stamen : str
Dataset to use for background image. Set to None to not use
background image.
tile : int
Tile zoom to use with background image. Higer number indicates
more resolution. A value of 8 is typical for a normal sonde plot.
cartopy_feature : list of str or str
Cartopy feature to add to plot.
cmap : str
Color map to use for colorbar.
text : dictionary
Dictionary of {text:[lon,lat]} to add to plot. Can have more
than one set of text to add.
gridlines : boolean
Use latitude and longitude gridlines.
**kwargs : keyword arguments
Any other keyword arguments that will be passed
into :func:`matplotlib.pyplot.scatter` when the figure
is made. See the matplotlib documentation for further details
on what keyword arguments are available.
"""
# Get current plotting figure
# del self.axes
# if self.fig is None:
# self.fig = plt.figure()
if dsname is None and len(self._arm.keys()) > 1:
raise ValueError(("You must choose a datastream when there are 2 "
"or more datasets in the GeographicPlotDisplay "
"object."))
elif dsname is None:
dsname = list(self._arm.keys())[0]
if data_field is None:
raise ValueError(("You must enter the name of the data "
"to be plotted."))
# Extract data from object
try:
lat = self._arm[dsname][lat_field].values
except KeyError:
raise ValueError(("You will need to provide the name of the "
"field if not '{}' to use for latitued "
"data.").format(lat_field))
try:
lon = self._arm[dsname][lon_field].values
except KeyError:
raise ValueError(("You will need to provide the name of the "
"field if not '{}' to use for longitude "
"data.").format(lon_field))
# Set up metadata information for display on plot
if cbar_label is None:
try:
cbar_label = (
self._arm[dsname][data_field].attrs['long_name'] +
' (' + self._arm[dsname][data_field].attrs['units'] + ')')
except KeyError:
cbar_label = data_field
lat_limits = [np.nanmin(lat), np.nanmax(lat)]
lon_limits = [np.nanmin(lon), np.nanmax(lon)]
box_size = np.max([np.abs(np.diff(lat_limits)),
np.abs(np.diff(lon_limits))])
bx_buf = box_size * plot_buffer
lat_center = np.sum(lat_limits) / 2.
lon_center = np.sum(lon_limits) / 2.
lat_limits = [lat_center - box_size / 2. - bx_buf,
lat_center + box_size / 2. + bx_buf]
lon_limits = [lon_center - box_size / 2. - bx_buf,
lon_center + box_size / 2. + bx_buf]
data = self._arm[dsname][data_field].values
# Create base plot projection
ax = plt.axes(projection=projection)
plt.subplots_adjust(left=0.01, right=0.99, bottom=0.05, top=0.93)
ax.set_extent([lon_limits[0], lon_limits[1], lat_limits[0],
lat_limits[1]], crs=projection)
if title is None:
try:
dim = list(self._arm[dsname][data_field].dims)
ts = pd.to_datetime(str(self._arm[dsname][dim[0]].values[0]))
date = ts.strftime('%Y-%m-%d')
time_str = ts.strftime('%H:%M:%S')
plt.title(' '.join([dsname, 'at', date, time_str]))
except NameError:
plt.title(dsname)
else:
plt.title(title)
if stamen:
tiler = Stamen(stamen)
ax.add_image(tiler, tile)
colorbar_map = None
if cmap is not None:
colorbar_map = plt.cm.get_cmap(cmap)
sc = ax.scatter(lon, lat, c=data, cmap=colorbar_map, **kwargs)
cbar = plt.colorbar(sc)
cbar.ax.set_ylabel(cbar_label)
if cartopy_feature is not None:
if isinstance(cartopy_feature, str):
cartopy_feature = [cartopy_feature]
cartopy_feature = [ii.upper() for ii in cartopy_feature]
if 'STATES' in cartopy_feature:
ax.add_feature(cfeature.STATES.with_scale('10m'))
if 'LAND' in cartopy_feature:
ax.add_feature(cfeature.LAND)
if 'OCEAN' in cartopy_feature:
ax.add_feature(cfeature.OCEAN)
if 'COASTLINE' in cartopy_feature:
ax.add_feature(cfeature.COASTLINE)
if 'BORDERS' in cartopy_feature:
ax.add_feature(cfeature.BORDERS, linestyle=':')
if 'LAKES' in cartopy_feature:
ax.add_feature(cfeature.LAKES, alpha=0.5)
if 'RIVERS' in cartopy_feature:
ax.add_feature(cfeature.RIVERS)
if text is not None:
for label, location in text.items():
ax.plot(location[0], location[1], marker='*', color='black')
ax.text(location[0], location[1], label, color='black')
if gridlines:
if projection == ccrs.PlateCarree() or projection == ccrs.Mercator:
gl = ax.gridlines(crs=projection, draw_labels=True,
linewidth=1, color='gray', alpha=0.5,
linestyle='--')
gl.xlabels_top = False
gl.ylabels_left = True
gl.xlabels_bottom = True
gl.ylabels_right = False
gl.xlabel_style = {'size': 6, 'color': 'gray'}
gl.ylabel_style = {'size': 6, 'color': 'gray'}
else:
# Labels are only currently supported for PlateCarree and Mercator
gl = ax.gridlines(draw_labels=False, linewidth=1, color='gray',
alpha=0.5, linestyle='--')
return ax
<|code_end|>
|
Deprecation Warnings
Deprecation warnings from the latest build that we need to look into
`act/qc/clean.py:43
/home/travis/build/ARM-DOE/ACT/act/qc/clean.py:43: DeprecationWarning: invalid escape sequence \_
"""
act/qc/clean.py:52
/home/travis/build/ARM-DOE/ACT/act/qc/clean.py:52: DeprecationWarning: invalid escape sequence \("a value of 0 \(no bits set\) indicates the data has not "
act/tests/test_io.py:36
/home/travis/build/ARM-DOE/ACT/act/tests/test_io.py:36: DeprecationWarning: invalid escape sequence \s act.tests.EXAMPLE_ANL_CSV, sep='\s+', column_names=headers)
act/tests/test_CropScape.py::test_cropType
/home/travis/miniconda3/envs/testenv/lib/python3.7/site-packages/pyproj/crs/crs.py:280: FutureWarning: '+init=<authority>:<code>' syntax is deprecated. '<authority>:<code>' is the preferred initialization method. When making the change, be mindful of axis order changes: https://pyproj4.github.io/pyproj/stable/gotchas.html#axis-order-changes-in-proj-6
projstring = _prepare_from_string(projparams)
act/tests/test_CropScape.py::test_cropType
/home/travis/build/ARM-DOE/ACT/act/discovery/get_CropScape.py:80: DeprecationWarning: This function is deprecated. See: https://pyproj4.github.io/pyproj/stable/gotchas.html#upgrading-to-pyproj-2-from-pyproj-1
x, y = pyproj.transform(inproj, outproj, lon, lat)`
|
act/qc/clean.py
<|code_start|>"""
act.qc.clean
------------------------------
Class definitions for cleaning up QC variables to standard
cf-compliance
"""
import xarray as xr
import re
import numpy as np
import copy
@xr.register_dataset_accessor('clean')
class CleanDataset(object):
"""
Class for cleaning up QC variables to standard cf-compliance
"""
def __init__(self, xarray_obj):
self._obj = xarray_obj
@property
def matched_qc_variables(self, check_arm_syntax=True):
"""
Find variables that are QC variables and return list of names.
Parameters
----------
check_arm_syntax : boolean
ARM ueses a standard of starting all quality control variables
with "qc\_". This is a more robust method of getting the quality
control variables before the standard_name attribute is added.
If this is true will first check using attributes and will then
check if variable starts with "qc\_".
Returns
-------
variables : list of str
A list of strings containing the name of each variable.
"""
variables = []
# Will need to find all historical cases and add to list
qc_dict = {'description':
["See global attributes for individual.+bit descriptions.",
("This field contains bit packed integer values, where each "
"bit represents a QC test on the data. Non-zero bits indicate "
"the QC condition given in the description for those bits; "
"a value of 0 \(no bits set\) indicates the data has not "
"failed any QC tests."),
(r"This field contains bit packed values which should be "
r"interpreted as listed..+")
]
}
# Loop over each variable and look for a match to an attribute that
# would exist if the variable is a QC variable
for var in self._obj.data_vars:
attributes = self._obj[var].attrs
for att_name in attributes:
if att_name in qc_dict.keys():
for value in qc_dict[att_name]:
if re.match(value, attributes[att_name]) is not None:
variables.append(var)
break
# Check the start of the variable name. If it begins with qc_ assume quality
# control variable from ARM.
if check_arm_syntax:
variables_qc = [var for var in self._obj.data_vars if var.startswith('qc_')]
variables = variables + variables_qc
variables = list(set(variables))
return variables
def cleanup(self, cleanup_arm_qc=True, clean_arm_state_vars=None,
handle_missing_value=True, link_qc_variables=True,
normalize_assessment=False,
**kwargs):
"""
Wrapper method to automatically call all the standard methods
for obj cleanup.
Parameters
----------
cleanup_arm_qc : bool
Option to clean xarray object from ARM QC to CF QC standards.
Default is True.
clean_arm_state_vars : list of str
Option to clean xarray object state variables from ARM to CF
standards. Pass in list of variable names.
handle_missing_value : bool
Go through variables and look for cases where a QC or state varible
was convereted to a float and missing values set to np.nan. This
is done because of xarry's default to use mask_and_scale=True.
This will convert the data type back to integer and replace
any instances of np.nan to a missing value indicator (most
likely -9999).
link_qc_variables : bool
Option to link QC variablers through ancillary_variables if not
already set.
normalize_assessment : bool
Option to clean up assessments to use the same terminology. Set to
False for default because should only be an issue after adding DQRs
and the function to add DQRs calls this method.
**kwargs : keywords
Keyword arguments passed through to clean.clean_arm_qc
method.
"""
# Convert ARM QC to be more like CF state fields
if cleanup_arm_qc:
self._obj.clean.clean_arm_qc(**kwargs)
# Convert ARM state fields to be more liek CF state fields
if clean_arm_state_vars is not None:
self._obj.clean.clean_arm_state_variables(clean_arm_state_vars)
# Correctly convert data type because of missing value
# indicators in state and QC variables. Needs to be run after
# clean.clean_arm_qc to use CF attribute names.
if handle_missing_value:
self._obj.clean.handle_missing_values()
# Add some ancillary_variables linkages
# between data variable and QC variable
if link_qc_variables:
self._obj.clean.link_variables()
# Update the terminology used with flag_assessments to be consistent
if normalize_assessment:
self._obj.clean.normalize_assessment()
def handle_missing_values(self, default_missing_value=np.int32(-9999)):
"""
Correctly handle missing_value and _FillValue in object.
xarray will automatically replace missing_value and
_FillValue in the data with NaN. This is great for data set
as type float but not great for int data. Can cause issues
with QC and state fields. This will loop through the array
looking for state and QC fields and revert them back to int
data type if upconverted to float to handle NaNs. Issue is that
xarray will convert data type to float if the attribute is defined
even if no data are set as missing value. xarray will also then
remove the missing_value or _FillValue variable attribute. This
will put the missing_value attribute back if needed.
Parameters
----------
default_missing_value : numpy int or float
The default missing value to use if a missing_value attribute
is not defined but one is needed.
"""
state_att_names = ['flag_values', 'flag_meanings',
'flag_masks', 'flag_attributes']
# Look for variables that have 2 of the state_att_names defined
# as attribures and is of type float. If so assume the variable
# was incorreclty converted to float type.
for var in self._obj.data_vars:
var_att_names = self._obj[var].attrs.keys()
if (len(set(state_att_names) & set(var_att_names)) >= 2 and
self._obj[var].values.dtype in
[np.dtype('float16'), np.dtype('float32'),
np.dtype('float64')]):
# Look at units variable to see if this is the stupid way some
# ARM products mix data and state variables. If the units are not
# in the normal list of unitless type assume this is a data variable
# and skip. Other option is to lookf or a valid_range attribute
# and skip. This is commented out for now since the units check
# appears to be working.
try:
if self._obj[var].attrs['units'] not in ['1', 'unitless', '', ' ']:
continue
# self._obj[var].attrs['valid_range']
# continue
except KeyError:
pass
# Change any np.nan values to missing value indicator
data = self._obj[var].values
data[np.isnan(data)] = default_missing_value.astype(data.dtype)
# Convert data to match type of flag_mask or flag_values
# as the best guess of what type is correct.
found_dtype = False
for att_name in ['flag_masks', 'flag_values']:
try:
att_value = self._obj[var].attrs[att_name]
if isinstance(att_value, (list, tuple)):
dtype = att_value[0].dtype
else:
dtype = att_value.dtype
data = data.astype(dtype)
found_dtype = True
break
except (KeyError, IndexError):
pass
# If flag_mask or flag_values is not available choose an int type
# and set data to that type.
if found_dtype is False:
data = data.astype(default_missing_value.dtype)
# Return data to object and add missing value indicator
# attribute to variable.
self._obj[var].values = data
self._obj[var].attrs['missing_value'] = \
default_missing_value.astype(data.dtype)
def get_attr_info(self, variable=None, flag=False):
"""
Get ARM quality control definitions from the ARM standard
bit_#_description, ... attributes and return as dictionary.
Will attempt to guess if the flag is integer or bit packed
based on what attributes are set.
Parameters
----------
variable : str
Variable name to get attribute information. If set to None
will get global attributes.
flag : bool
Optional flag indicating if QC is expected to be bitpacked
or integer. Flag = True indicates integer QC. Default
is bitpacked or False.
Returns
-------
attributes dictionary : dict or None
A dictionary contianing the attribute information converted from
ARM QC to CF QC. All keys include 'flag_meanings', 'flag_masks',
'flag_values', 'flag_assessments', 'flag_tests', 'arm_attributes'.
Returns None if none found.
"""
string = 'bit'
if flag:
string = 'flag'
else:
found_string = False
try:
if self._obj.attrs['qc_bit_comment']:
string = 'bit'
found_string = True
except KeyError:
pass
if found_string is False:
try:
if self._obj.attrs['qc_flag_comment']:
string = 'flag'
found_string = True
except KeyError:
pass
if found_string is False:
var = self.matched_qc_variables
if len(var) > 0:
try:
flag_method = self._obj[var[0]].attrs['flag_method']
string = flag_method
found_string = True
except KeyError:
pass
try:
if variable:
attr_description_pattern = (r"(^" + string +
r")_([0-9]+)_(description$)")
attr_assessment_pattern = (r"(^" + string +
r")_([0-9]+)_(assessment$)")
attr_comment_pattern = (r"(^" + string +
r")_([0-9]+)_(comment$)")
attributes = self._obj[variable].attrs
else:
attr_description_pattern = (r"(^qc_" + string +
r")_([0-9]+)_(description$)")
attr_assessment_pattern = (r"(^qc_" + string +
r")_([0-9]+)_(assessment$)")
attr_comment_pattern = (r"(^qc_" + string +
r")_([0-9]+)_(comment$)")
attributes = self._obj.attrs
except KeyError:
return None
assessment_bit_num = []
description_bit_num = []
comment_bit_num = []
flag_masks = []
flag_meanings = []
flag_assessments = []
flag_comments = []
arm_attributes = []
dtype = np.int32
for att_name in attributes:
try:
description = re.match(attr_description_pattern, att_name)
description_bit_num.append(int(description.groups()[1]))
flag_meanings.append(attributes[att_name])
arm_attributes.append(att_name)
except AttributeError:
pass
try:
assessment = re.match(attr_assessment_pattern, att_name)
assessment_bit_num.append(int(assessment.groups()[1]))
flag_assessments.append(attributes[att_name])
arm_attributes.append(att_name)
except AttributeError:
pass
try:
comment = re.match(attr_comment_pattern, att_name)
comment_bit_num.append(int(comment.groups()[1]))
flag_comments.append(attributes[att_name])
arm_attributes.append(att_name)
except AttributeError:
pass
if variable is not None:
# Try and get the data type from the variable if it is an int
try:
if (self._obj[variable].values.dtype in [
np.dtype('int8'), np.dtype('int16'),
np.dtype('int32'), np.dtype('int64')]):
dtype = self._obj[variable].values.dtype
except AttributeError:
pass
# If the data is type float check the largest value and make
# sure the type we set can handle it.
if np.nanmax(self._obj[variable].values) > 2**32 - 1:
dtype = np.int64
# Sort on bit number to ensure correct description order
index = np.argsort(description_bit_num)
flag_meanings = np.array(flag_meanings)
description_bit_num = np.array(description_bit_num)
flag_meanings = flag_meanings[index]
description_bit_num = description_bit_num[index]
# Sort on bit number to ensure correct assessment order
if len(flag_assessments) > 0:
if len(flag_assessments) < len(flag_meanings):
for ii in range(1, len(flag_meanings) + 1):
if ii not in assessment_bit_num:
assessment_bit_num.append(ii)
flag_assessments.append('')
index = np.argsort(assessment_bit_num)
flag_assessments = np.array(flag_assessments)
flag_assessments = flag_assessments[index]
# Sort on bit number to ensure correct comment order
if len(flag_comments) > 0:
if len(flag_comments) < len(flag_meanings):
for ii in range(1, len(flag_meanings) + 1):
if ii not in comment_bit_num:
comment_bit_num.append(ii)
flag_comments.append('')
index = np.argsort(comment_bit_num)
flag_comments = np.array(flag_comments)
flag_comments = flag_comments[index]
# Convert bit number to mask number
if len(description_bit_num) > 0:
flag_masks = np.array(description_bit_num)
flag_masks = np.left_shift(1, flag_masks - 1)
# build dictionary to return values
if len(flag_masks) > 0 or len(description_bit_num) > 0:
return_dict = dict()
return_dict['flag_meanings'] = list(np.array(flag_meanings,
dtype=np.str))
if len(flag_masks) > 0 and max(flag_masks) > 2**32 - 1:
flag_mask_dtype = np.int64
else:
flag_mask_dtype = dtype
if flag:
return_dict['flag_values'] = list(np.array(description_bit_num,
dtype=dtype))
return_dict['flag_masks'] = list(np.array([],
dtype=flag_mask_dtype))
else:
return_dict['flag_values'] = list(np.array([],
dtype=dtype))
return_dict['flag_masks'] = list(np.array(flag_masks,
dtype=flag_mask_dtype))
return_dict['flag_assessments'] = list(np.array(flag_assessments,
dtype=np.str))
return_dict['flag_tests'] = list(np.array(description_bit_num,
dtype=dtype))
return_dict['flag_comments'] = list(np.array(flag_comments,
dtype=np.str))
return_dict['arm_attributes'] = arm_attributes
else:
# If nothing to return set to None
return_dict = None
return return_dict
def clean_arm_state_variables(self,
variables,
override_cf_flag=True,
clean_units_string=True,
integer_flag=True):
"""
Function to clean up state variables to use more CF style.
Parameters
----------
variables : str or list of str
List of variable names to update.
override_cf_flag : bool
Option to overwrite CF flag_meanings attribute if it exists
with the values from ARM QC bit_#_description.
clean_units_string : bool
Option to update units string if set to 'unitless' to be
udunits compliant '1'.
integer_flag : bool
Pass through keyword of 'flag' for get_attr_info().
"""
if isinstance(variables, str):
variables = [variables]
for var in variables:
flag_info = self.get_attr_info(variable=var, flag=integer_flag)
if flag_info is None:
return
# Add new attributes to variable
for attr in ['flag_values', 'flag_meanings', 'flag_masks']:
if len(flag_info[attr]) > 0:
# Only add if attribute does not exist.
if attr in self._obj[var].attrs.keys() is False:
self._obj[var].attrs[attr] = copy.copy(flag_info[attr])
# If flag is set set attribure even if exists
elif override_cf_flag:
self._obj[var].attrs[attr] = copy.copy(flag_info[attr])
# Remove replaced attributes
arm_attributes = flag_info['arm_attributes']
for attr in arm_attributes:
try:
del self._obj[var].attrs[attr]
except KeyError:
pass
# Clean up units attribute from unitless to udunits '1'
if (clean_units_string and
self._obj[var].attrs['units'] == 'unitless'):
self._obj[var].attrs['units'] = '1'
def correct_valid_minmax(self, qc_variable):
"""
Function to correct the name and location of quality control limit
variables that use valid_min and valid_max incorrectly.
Parameters
----------
qc_variable : str
Name of quality control variable in xarray object to correct.
"""
test_dict = {'valid_min': 'fail_min',
'valid_max': 'fail_max',
'valid_delta': 'fail_delta'}
aa = re.match(r"^qc_(.+)", qc_variable)
variable = None
try:
variable = aa.groups()[0]
except AttributeError:
return
made_change = False
try:
flag_meanings = copy.copy(
self._obj[qc_variable].attrs['flag_meanings'])
except KeyError:
return
for attr in test_dict.keys():
for ii, test in enumerate(flag_meanings):
if attr in test:
flag_meanings[ii] = re.sub(attr, test_dict[attr], test)
made_change = True
try:
self._obj[qc_variable].attrs[test_dict[attr]] = \
copy.copy(self._obj[variable].attrs[attr])
del self._obj[variable].attrs[attr]
except KeyError:
pass
if made_change:
self._obj[qc_variable].attrs['flag_meanings'] = flag_meanings
def link_variables(self):
"""
Add some attributes to link and explain data
to QC data relationship. Will use non-CF standard_name
of quality_flag. Hopefully this will be added to the
standard_name table in the future.
"""
for var in self._obj.data_vars:
aa = re.match(r"^qc_(.+)", var)
try:
variable = aa.groups()[0]
qc_variable = var
except AttributeError:
continue
# Skip data quality fields.
try:
if not ('Quality check results on field:' in
self._obj[var].attrs['long_name']):
continue
except KeyError:
pass
# Get existing data variable ancillary_variables attribute
try:
ancillary_variables = self._obj[variable].\
attrs['ancillary_variables']
except KeyError:
ancillary_variables = ''
# If the QC variable is not in ancillary_variables add
if qc_variable not in ancillary_variables:
ancillary_variables = qc_variable
self._obj[variable].attrs['ancillary_variables']\
= copy.copy(ancillary_variables)
# Check if QC variable has correct standard_name and iff not fix it.
correct_standard_name = 'quality_flag'
try:
if self._obj[qc_variable].attrs['standard_name'] != correct_standard_name:
self._obj[qc_variable].attrs['standard_name'] = correct_standard_name
except KeyError:
self._obj[qc_variable].attrs['standard_name'] = correct_standard_name
def clean_arm_qc(self,
override_cf_flag=True,
clean_units_string=True,
correct_valid_min_max=True):
"""
Function to clean up xarray object QC variables.
Parameters
----------
override_cf_flag : bool
Option to overwrite CF flag_masks, flag_meanings, flag_values
if exists.
clean_units_string : bool
Option to clean up units string from 'unitless'
to udunits compliant '1'.
correct_valid_min_max : bool
Option to correct use of valid_min and valid_max with QC variables
by moving from data variable to QC varible, renaming to fail_min,
fail_max and fail_detla if the valid_min, valid_max or valid_delta
is listed in bit discription attribute. If not listed as
used with QC will assume is being used correctly.
"""
global_qc = self.get_attr_info()
for qc_var in self.matched_qc_variables:
# Clean up units attribute from unitless to udunits '1'
try:
if (clean_units_string and
self._obj[qc_var].attrs['units'] == 'unitless'):
self._obj[qc_var].attrs['units'] = '1'
except KeyError:
pass
qc_attributes = self.get_attr_info(variable=qc_var)
if qc_attributes is None:
qc_attributes = global_qc
# Add new attributes to variable
for attr in ['flag_masks', 'flag_meanings',
'flag_assessments', 'flag_values', 'flag_comments']:
if qc_attributes is not None and len(qc_attributes[attr]) > 0:
# Only add if attribute does not exists
if attr in self._obj[qc_var].attrs.keys() is False:
self._obj[qc_var].attrs[attr] = copy.copy(qc_attributes[attr])
# If flag is set add attribure even if already exists
elif override_cf_flag:
self._obj[qc_var].attrs[attr] = copy.copy(qc_attributes[attr])
# Remove replaced attributes
if qc_attributes is not None:
arm_attributes = qc_attributes['arm_attributes']
arm_attributes.extend(['description', 'flag_method'])
for attr in arm_attributes:
try:
del self._obj[qc_var].attrs[attr]
except KeyError:
pass
# Check for use of valid_min and valid_max as QC limits and fix
if correct_valid_min_max:
self._obj.clean.correct_valid_minmax(qc_var)
# Clean up global attributes
if global_qc is not None:
global_attributes = global_qc['arm_attributes']
global_attributes.extend(['qc_bit_comment'])
for attr in global_attributes:
try:
del self._obj.attrs[attr]
except KeyError:
pass
def normalize_assessment(self, variables=None, exclude_variables=None,
qc_lookup={"Incorrect": "Bad", "Suspect": "Indeterminate"}):
"""
Method to clean up assessment terms used to be consistent between
embedded QC and DQRs.
Parameters
----------
variables : str or list of str
Optional data variable names to check and normalize. If set to
None will check all variables.
exclude_variables : str or list of str
Optional data variable names to exclude from processing.
qc_lookup : dict
Optional dictionary used to convert between terms.
"""
# Get list of variables if not provided
if variables is None:
variables = list(self._obj.data_vars)
# Ensure variables is a list
if not isinstance(variables, (list, tuple)):
variables = [variables]
# If exclude variables provided remove from variables list
if exclude_variables is not None:
if not isinstance(exclude_variables, (list, tuple)):
exclude_variables = [exclude_variables]
variables = list(set(variables) - set(exclude_variables))
# Loop over variables checking if a QC variable exits and use the
# lookup dictionary to convert the assessment terms.
for var_name in variables:
qc_var_name = self._obj.qcfilter.check_for_ancillary_qc(
var_name, add_if_missing=False, cleanup=False)
if qc_var_name is not None:
try:
flag_assessments = self._obj[qc_var_name].attrs['flag_assessments']
except KeyError:
continue
for ii, assess in enumerate(flag_assessments):
try:
flag_assessments[ii] = qc_lookup[assess]
except KeyError:
continue
<|code_end|>
|
act/qc/clean.py
<|code_start|>"""
act.qc.clean
------------------------------
Class definitions for cleaning up QC variables to standard
cf-compliance
"""
import xarray as xr
import re
import numpy as np
import copy
@xr.register_dataset_accessor('clean')
class CleanDataset(object):
"""
Class for cleaning up QC variables to standard cf-compliance
"""
def __init__(self, xarray_obj):
self._obj = xarray_obj
@property
def matched_qc_variables(self, check_arm_syntax=True):
"""
Find variables that are QC variables and return list of names.
Parameters
----------
check_arm_syntax : boolean
ARM ueses a standard of starting all quality control variables
with "qc" joined with an underscore. This is a more robust method
of getting the quality control variables before the standard_name
attribute is added. If this is true will first check using
attributes and will then check if variable starts with "qc".
Returns
-------
variables : list of str
A list of strings containing the name of each variable.
"""
variables = []
# Will need to find all historical cases and add to list
qc_dict = {'description':
["See global attributes for individual.+bit descriptions.",
("This field contains bit packed integer values, where each "
"bit represents a QC test on the data. Non-zero bits indicate "
"the QC condition given in the description for those bits; "
"a value of 0.+ indicates the data has not "
"failed any QC tests."),
(r"This field contains bit packed values which should be "
r"interpreted as listed..+")
]
}
# Loop over each variable and look for a match to an attribute that
# would exist if the variable is a QC variable
for var in self._obj.data_vars:
attributes = self._obj[var].attrs
for att_name in attributes:
if att_name in qc_dict.keys():
for value in qc_dict[att_name]:
if re.match(value, attributes[att_name]) is not None:
variables.append(var)
break
# Check the start of the variable name. If it begins with qc_ assume quality
# control variable from ARM.
if check_arm_syntax:
variables_qc = [var for var in self._obj.data_vars if var.startswith('qc_')]
variables = variables + variables_qc
variables = list(set(variables))
return variables
def cleanup(self, cleanup_arm_qc=True, clean_arm_state_vars=None,
handle_missing_value=True, link_qc_variables=True,
normalize_assessment=False,
**kwargs):
"""
Wrapper method to automatically call all the standard methods
for obj cleanup.
Parameters
----------
cleanup_arm_qc : bool
Option to clean xarray object from ARM QC to CF QC standards.
Default is True.
clean_arm_state_vars : list of str
Option to clean xarray object state variables from ARM to CF
standards. Pass in list of variable names.
handle_missing_value : bool
Go through variables and look for cases where a QC or state varible
was convereted to a float and missing values set to np.nan. This
is done because of xarry's default to use mask_and_scale=True.
This will convert the data type back to integer and replace
any instances of np.nan to a missing value indicator (most
likely -9999).
link_qc_variables : bool
Option to link QC variablers through ancillary_variables if not
already set.
normalize_assessment : bool
Option to clean up assessments to use the same terminology. Set to
False for default because should only be an issue after adding DQRs
and the function to add DQRs calls this method.
**kwargs : keywords
Keyword arguments passed through to clean.clean_arm_qc
method.
"""
# Convert ARM QC to be more like CF state fields
if cleanup_arm_qc:
self._obj.clean.clean_arm_qc(**kwargs)
# Convert ARM state fields to be more liek CF state fields
if clean_arm_state_vars is not None:
self._obj.clean.clean_arm_state_variables(clean_arm_state_vars)
# Correctly convert data type because of missing value
# indicators in state and QC variables. Needs to be run after
# clean.clean_arm_qc to use CF attribute names.
if handle_missing_value:
self._obj.clean.handle_missing_values()
# Add some ancillary_variables linkages
# between data variable and QC variable
if link_qc_variables:
self._obj.clean.link_variables()
# Update the terminology used with flag_assessments to be consistent
if normalize_assessment:
self._obj.clean.normalize_assessment()
def handle_missing_values(self, default_missing_value=np.int32(-9999)):
"""
Correctly handle missing_value and _FillValue in object.
xarray will automatically replace missing_value and
_FillValue in the data with NaN. This is great for data set
as type float but not great for int data. Can cause issues
with QC and state fields. This will loop through the array
looking for state and QC fields and revert them back to int
data type if upconverted to float to handle NaNs. Issue is that
xarray will convert data type to float if the attribute is defined
even if no data are set as missing value. xarray will also then
remove the missing_value or _FillValue variable attribute. This
will put the missing_value attribute back if needed.
Parameters
----------
default_missing_value : numpy int or float
The default missing value to use if a missing_value attribute
is not defined but one is needed.
"""
state_att_names = ['flag_values', 'flag_meanings',
'flag_masks', 'flag_attributes']
# Look for variables that have 2 of the state_att_names defined
# as attribures and is of type float. If so assume the variable
# was incorreclty converted to float type.
for var in self._obj.data_vars:
var_att_names = self._obj[var].attrs.keys()
if (len(set(state_att_names) & set(var_att_names)) >= 2 and
self._obj[var].values.dtype in
[np.dtype('float16'), np.dtype('float32'),
np.dtype('float64')]):
# Look at units variable to see if this is the stupid way some
# ARM products mix data and state variables. If the units are not
# in the normal list of unitless type assume this is a data variable
# and skip. Other option is to lookf or a valid_range attribute
# and skip. This is commented out for now since the units check
# appears to be working.
try:
if self._obj[var].attrs['units'] not in ['1', 'unitless', '', ' ']:
continue
# self._obj[var].attrs['valid_range']
# continue
except KeyError:
pass
# Change any np.nan values to missing value indicator
data = self._obj[var].values
data[np.isnan(data)] = default_missing_value.astype(data.dtype)
# Convert data to match type of flag_mask or flag_values
# as the best guess of what type is correct.
found_dtype = False
for att_name in ['flag_masks', 'flag_values']:
try:
att_value = self._obj[var].attrs[att_name]
if isinstance(att_value, (list, tuple)):
dtype = att_value[0].dtype
else:
dtype = att_value.dtype
data = data.astype(dtype)
found_dtype = True
break
except (KeyError, IndexError):
pass
# If flag_mask or flag_values is not available choose an int type
# and set data to that type.
if found_dtype is False:
data = data.astype(default_missing_value.dtype)
# Return data to object and add missing value indicator
# attribute to variable.
self._obj[var].values = data
self._obj[var].attrs['missing_value'] = \
default_missing_value.astype(data.dtype)
def get_attr_info(self, variable=None, flag=False):
"""
Get ARM quality control definitions from the ARM standard
bit_#_description, ... attributes and return as dictionary.
Will attempt to guess if the flag is integer or bit packed
based on what attributes are set.
Parameters
----------
variable : str
Variable name to get attribute information. If set to None
will get global attributes.
flag : bool
Optional flag indicating if QC is expected to be bitpacked
or integer. Flag = True indicates integer QC. Default
is bitpacked or False.
Returns
-------
attributes dictionary : dict or None
A dictionary contianing the attribute information converted from
ARM QC to CF QC. All keys include 'flag_meanings', 'flag_masks',
'flag_values', 'flag_assessments', 'flag_tests', 'arm_attributes'.
Returns None if none found.
"""
string = 'bit'
if flag:
string = 'flag'
else:
found_string = False
try:
if self._obj.attrs['qc_bit_comment']:
string = 'bit'
found_string = True
except KeyError:
pass
if found_string is False:
try:
if self._obj.attrs['qc_flag_comment']:
string = 'flag'
found_string = True
except KeyError:
pass
if found_string is False:
var = self.matched_qc_variables
if len(var) > 0:
try:
flag_method = self._obj[var[0]].attrs['flag_method']
string = flag_method
found_string = True
except KeyError:
pass
try:
if variable:
attr_description_pattern = (r"(^" + string +
r")_([0-9]+)_(description$)")
attr_assessment_pattern = (r"(^" + string +
r")_([0-9]+)_(assessment$)")
attr_comment_pattern = (r"(^" + string +
r")_([0-9]+)_(comment$)")
attributes = self._obj[variable].attrs
else:
attr_description_pattern = (r"(^qc_" + string +
r")_([0-9]+)_(description$)")
attr_assessment_pattern = (r"(^qc_" + string +
r")_([0-9]+)_(assessment$)")
attr_comment_pattern = (r"(^qc_" + string +
r")_([0-9]+)_(comment$)")
attributes = self._obj.attrs
except KeyError:
return None
assessment_bit_num = []
description_bit_num = []
comment_bit_num = []
flag_masks = []
flag_meanings = []
flag_assessments = []
flag_comments = []
arm_attributes = []
dtype = np.int32
for att_name in attributes:
try:
description = re.match(attr_description_pattern, att_name)
description_bit_num.append(int(description.groups()[1]))
flag_meanings.append(attributes[att_name])
arm_attributes.append(att_name)
except AttributeError:
pass
try:
assessment = re.match(attr_assessment_pattern, att_name)
assessment_bit_num.append(int(assessment.groups()[1]))
flag_assessments.append(attributes[att_name])
arm_attributes.append(att_name)
except AttributeError:
pass
try:
comment = re.match(attr_comment_pattern, att_name)
comment_bit_num.append(int(comment.groups()[1]))
flag_comments.append(attributes[att_name])
arm_attributes.append(att_name)
except AttributeError:
pass
if variable is not None:
# Try and get the data type from the variable if it is an int
try:
if (self._obj[variable].values.dtype in [
np.dtype('int8'), np.dtype('int16'),
np.dtype('int32'), np.dtype('int64')]):
dtype = self._obj[variable].values.dtype
except AttributeError:
pass
# If the data is type float check the largest value and make
# sure the type we set can handle it.
if np.nanmax(self._obj[variable].values) > 2**32 - 1:
dtype = np.int64
# Sort on bit number to ensure correct description order
index = np.argsort(description_bit_num)
flag_meanings = np.array(flag_meanings)
description_bit_num = np.array(description_bit_num)
flag_meanings = flag_meanings[index]
description_bit_num = description_bit_num[index]
# Sort on bit number to ensure correct assessment order
if len(flag_assessments) > 0:
if len(flag_assessments) < len(flag_meanings):
for ii in range(1, len(flag_meanings) + 1):
if ii not in assessment_bit_num:
assessment_bit_num.append(ii)
flag_assessments.append('')
index = np.argsort(assessment_bit_num)
flag_assessments = np.array(flag_assessments)
flag_assessments = flag_assessments[index]
# Sort on bit number to ensure correct comment order
if len(flag_comments) > 0:
if len(flag_comments) < len(flag_meanings):
for ii in range(1, len(flag_meanings) + 1):
if ii not in comment_bit_num:
comment_bit_num.append(ii)
flag_comments.append('')
index = np.argsort(comment_bit_num)
flag_comments = np.array(flag_comments)
flag_comments = flag_comments[index]
# Convert bit number to mask number
if len(description_bit_num) > 0:
flag_masks = np.array(description_bit_num)
flag_masks = np.left_shift(1, flag_masks - 1)
# build dictionary to return values
if len(flag_masks) > 0 or len(description_bit_num) > 0:
return_dict = dict()
return_dict['flag_meanings'] = list(np.array(flag_meanings,
dtype=np.str))
if len(flag_masks) > 0 and max(flag_masks) > 2**32 - 1:
flag_mask_dtype = np.int64
else:
flag_mask_dtype = dtype
if flag:
return_dict['flag_values'] = list(np.array(description_bit_num,
dtype=dtype))
return_dict['flag_masks'] = list(np.array([],
dtype=flag_mask_dtype))
else:
return_dict['flag_values'] = list(np.array([],
dtype=dtype))
return_dict['flag_masks'] = list(np.array(flag_masks,
dtype=flag_mask_dtype))
return_dict['flag_assessments'] = list(np.array(flag_assessments,
dtype=np.str))
return_dict['flag_tests'] = list(np.array(description_bit_num,
dtype=dtype))
return_dict['flag_comments'] = list(np.array(flag_comments,
dtype=np.str))
return_dict['arm_attributes'] = arm_attributes
else:
# If nothing to return set to None
return_dict = None
return return_dict
def clean_arm_state_variables(self,
variables,
override_cf_flag=True,
clean_units_string=True,
integer_flag=True):
"""
Function to clean up state variables to use more CF style.
Parameters
----------
variables : str or list of str
List of variable names to update.
override_cf_flag : bool
Option to overwrite CF flag_meanings attribute if it exists
with the values from ARM QC bit_#_description.
clean_units_string : bool
Option to update units string if set to 'unitless' to be
udunits compliant '1'.
integer_flag : bool
Pass through keyword of 'flag' for get_attr_info().
"""
if isinstance(variables, str):
variables = [variables]
for var in variables:
flag_info = self.get_attr_info(variable=var, flag=integer_flag)
if flag_info is None:
return
# Add new attributes to variable
for attr in ['flag_values', 'flag_meanings', 'flag_masks']:
if len(flag_info[attr]) > 0:
# Only add if attribute does not exist.
if attr in self._obj[var].attrs.keys() is False:
self._obj[var].attrs[attr] = copy.copy(flag_info[attr])
# If flag is set set attribure even if exists
elif override_cf_flag:
self._obj[var].attrs[attr] = copy.copy(flag_info[attr])
# Remove replaced attributes
arm_attributes = flag_info['arm_attributes']
for attr in arm_attributes:
try:
del self._obj[var].attrs[attr]
except KeyError:
pass
# Clean up units attribute from unitless to udunits '1'
if (clean_units_string and
self._obj[var].attrs['units'] == 'unitless'):
self._obj[var].attrs['units'] = '1'
def correct_valid_minmax(self, qc_variable):
"""
Function to correct the name and location of quality control limit
variables that use valid_min and valid_max incorrectly.
Parameters
----------
qc_variable : str
Name of quality control variable in xarray object to correct.
"""
test_dict = {'valid_min': 'fail_min',
'valid_max': 'fail_max',
'valid_delta': 'fail_delta'}
aa = re.match(r"^qc_(.+)", qc_variable)
variable = None
try:
variable = aa.groups()[0]
except AttributeError:
return
made_change = False
try:
flag_meanings = copy.copy(
self._obj[qc_variable].attrs['flag_meanings'])
except KeyError:
return
for attr in test_dict.keys():
for ii, test in enumerate(flag_meanings):
if attr in test:
flag_meanings[ii] = re.sub(attr, test_dict[attr], test)
made_change = True
try:
self._obj[qc_variable].attrs[test_dict[attr]] = \
copy.copy(self._obj[variable].attrs[attr])
del self._obj[variable].attrs[attr]
except KeyError:
pass
if made_change:
self._obj[qc_variable].attrs['flag_meanings'] = flag_meanings
def link_variables(self):
"""
Add some attributes to link and explain data
to QC data relationship. Will use non-CF standard_name
of quality_flag. Hopefully this will be added to the
standard_name table in the future.
"""
for var in self._obj.data_vars:
aa = re.match(r"^qc_(.+)", var)
try:
variable = aa.groups()[0]
qc_variable = var
except AttributeError:
continue
# Skip data quality fields.
try:
if not ('Quality check results on field:' in
self._obj[var].attrs['long_name']):
continue
except KeyError:
pass
# Get existing data variable ancillary_variables attribute
try:
ancillary_variables = self._obj[variable].\
attrs['ancillary_variables']
except KeyError:
ancillary_variables = ''
# If the QC variable is not in ancillary_variables add
if qc_variable not in ancillary_variables:
ancillary_variables = qc_variable
self._obj[variable].attrs['ancillary_variables']\
= copy.copy(ancillary_variables)
# Check if QC variable has correct standard_name and iff not fix it.
correct_standard_name = 'quality_flag'
try:
if self._obj[qc_variable].attrs['standard_name'] != correct_standard_name:
self._obj[qc_variable].attrs['standard_name'] = correct_standard_name
except KeyError:
self._obj[qc_variable].attrs['standard_name'] = correct_standard_name
def clean_arm_qc(self,
override_cf_flag=True,
clean_units_string=True,
correct_valid_min_max=True):
"""
Function to clean up xarray object QC variables.
Parameters
----------
override_cf_flag : bool
Option to overwrite CF flag_masks, flag_meanings, flag_values
if exists.
clean_units_string : bool
Option to clean up units string from 'unitless'
to udunits compliant '1'.
correct_valid_min_max : bool
Option to correct use of valid_min and valid_max with QC variables
by moving from data variable to QC varible, renaming to fail_min,
fail_max and fail_detla if the valid_min, valid_max or valid_delta
is listed in bit discription attribute. If not listed as
used with QC will assume is being used correctly.
"""
global_qc = self.get_attr_info()
for qc_var in self.matched_qc_variables:
# Clean up units attribute from unitless to udunits '1'
try:
if (clean_units_string and
self._obj[qc_var].attrs['units'] == 'unitless'):
self._obj[qc_var].attrs['units'] = '1'
except KeyError:
pass
qc_attributes = self.get_attr_info(variable=qc_var)
if qc_attributes is None:
qc_attributes = global_qc
# Add new attributes to variable
for attr in ['flag_masks', 'flag_meanings',
'flag_assessments', 'flag_values', 'flag_comments']:
if qc_attributes is not None and len(qc_attributes[attr]) > 0:
# Only add if attribute does not exists
if attr in self._obj[qc_var].attrs.keys() is False:
self._obj[qc_var].attrs[attr] = copy.copy(qc_attributes[attr])
# If flag is set add attribure even if already exists
elif override_cf_flag:
self._obj[qc_var].attrs[attr] = copy.copy(qc_attributes[attr])
# Remove replaced attributes
if qc_attributes is not None:
arm_attributes = qc_attributes['arm_attributes']
arm_attributes.extend(['description', 'flag_method'])
for attr in arm_attributes:
try:
del self._obj[qc_var].attrs[attr]
except KeyError:
pass
# Check for use of valid_min and valid_max as QC limits and fix
if correct_valid_min_max:
self._obj.clean.correct_valid_minmax(qc_var)
# Clean up global attributes
if global_qc is not None:
global_attributes = global_qc['arm_attributes']
global_attributes.extend(['qc_bit_comment'])
for attr in global_attributes:
try:
del self._obj.attrs[attr]
except KeyError:
pass
def normalize_assessment(self, variables=None, exclude_variables=None,
qc_lookup={"Incorrect": "Bad", "Suspect": "Indeterminate"}):
"""
Method to clean up assessment terms used to be consistent between
embedded QC and DQRs.
Parameters
----------
variables : str or list of str
Optional data variable names to check and normalize. If set to
None will check all variables.
exclude_variables : str or list of str
Optional data variable names to exclude from processing.
qc_lookup : dict
Optional dictionary used to convert between terms.
"""
# Get list of variables if not provided
if variables is None:
variables = list(self._obj.data_vars)
# Ensure variables is a list
if not isinstance(variables, (list, tuple)):
variables = [variables]
# If exclude variables provided remove from variables list
if exclude_variables is not None:
if not isinstance(exclude_variables, (list, tuple)):
exclude_variables = [exclude_variables]
variables = list(set(variables) - set(exclude_variables))
# Loop over variables checking if a QC variable exits and use the
# lookup dictionary to convert the assessment terms.
for var_name in variables:
qc_var_name = self._obj.qcfilter.check_for_ancillary_qc(
var_name, add_if_missing=False, cleanup=False)
if qc_var_name is not None:
try:
flag_assessments = self._obj[qc_var_name].attrs['flag_assessments']
except KeyError:
continue
for ii, assess in enumerate(flag_assessments):
try:
flag_assessments[ii] = qc_lookup[assess]
except KeyError:
continue
<|code_end|>
|
Attributes on coordinate variable dropped after applying qcfilter.add_test() to data variable
After applying `ds.qcfilter.add_test()` to a data variable, the attributes on the coordinate variable dimensioning the tested variable are removed. This happens for act-atmos versions 1.0.2 and 1.0.3, but not 0.7.2 and before.
Steps to reproduce below:
`act-test.py`:
```
import act
import numpy as np
import xarray as xr
import pandas as pd
print(f"{act.__version__ = }")
print(f"{xr.__version__ = }")
ds = xr.Dataset(
data_vars=dict(dummy_data=(['time'], np.arange(0, 7))),
coords=dict(time=pd.date_range("2021-07-12 00:00:00", periods=7))
)
ds.time.attrs['units'] = "seconds since 1970-01-01T00:00:00"
ds.time.attrs['standard_name'] = "time"
ds.time.attrs['long_name'] = "time (UTC)"
print(f"Initially ds.time.attrs = {ds.time.attrs = }")
ds.qcfilter.add_test(
"dummy_data", index=np.full_like(ds.dummy_data.data, True),
test_number=1,
test_meaning="Example qc test",
test_assessment="Bad")
print(f"After qcfilter {ds.time.attrs = }")
```
Console:
```
>>> conda create --name act-test python=3.8
>>> conda activate act-test
>>> (act-test) pip install act-atmos==1.0.3
>>> (act-test) python act-test.py
act.__version__ = '1.0.3'
xr.__version__ = '0.18.2'
Initially ds.time.attrs = {'units': 'seconds since 1970-01-01T00:00:00', 'standard_name': 'time', 'long_name': 'time (UTC)'}
After qcfilter ds.time.attrs = {}
```
Attributes on coordinate variable dropped after applying qcfilter.add_test() to data variable
After applying `ds.qcfilter.add_test()` to a data variable, the attributes on the coordinate variable dimensioning the tested variable are removed. This happens for act-atmos versions 1.0.2 and 1.0.3, but not 0.7.2 and before.
Steps to reproduce below:
`act-test.py`:
```
import act
import numpy as np
import xarray as xr
import pandas as pd
print(f"{act.__version__ = }")
print(f"{xr.__version__ = }")
ds = xr.Dataset(
data_vars=dict(dummy_data=(['time'], np.arange(0, 7))),
coords=dict(time=pd.date_range("2021-07-12 00:00:00", periods=7))
)
ds.time.attrs['units'] = "seconds since 1970-01-01T00:00:00"
ds.time.attrs['standard_name'] = "time"
ds.time.attrs['long_name'] = "time (UTC)"
print(f"Initially ds.time.attrs = {ds.time.attrs = }")
ds.qcfilter.add_test(
"dummy_data", index=np.full_like(ds.dummy_data.data, True),
test_number=1,
test_meaning="Example qc test",
test_assessment="Bad")
print(f"After qcfilter {ds.time.attrs = }")
```
Console:
```
>>> conda create --name act-test python=3.8
>>> conda activate act-test
>>> (act-test) pip install act-atmos==1.0.3
>>> (act-test) python act-test.py
act.__version__ = '1.0.3'
xr.__version__ = '0.18.2'
Initially ds.time.attrs = {'units': 'seconds since 1970-01-01T00:00:00', 'standard_name': 'time', 'long_name': 'time (UTC)'}
After qcfilter ds.time.attrs = {}
```
|
act/qc/qcfilter.py
<|code_start|>"""
Functions and methods for creating ancillary quality control variables
and filters (masks) which can be used with various corrections
routines in ACT.
"""
import numpy as np
import xarray as xr
import dask
from act.qc import qctests, comparison_tests
@xr.register_dataset_accessor('qcfilter')
class QCFilter(qctests.QCTests, comparison_tests.QCTests, object):
"""
A class for building quality control variables containing arrays for
filtering data based on a set of test condition typically based on the
values in the data fields. These filters can be used in various
algorithms and calculations within ACT.
"""
def __init__(self, xarray_obj):
""" initialize """
self._obj = xarray_obj
def check_for_ancillary_qc(self, var_name, add_if_missing=True,
cleanup=True, flag_type=False):
"""
Method to check if a quality control variable exist in the dataset
and return the quality control varible name.
Will call create_qc_variable() to make variable if does not exist
and update_ancillary_variable() to ensure linkage between data and
quality control variable. Can also be used just to get the
corresponding quality control variable name with adding if
it is missing.
Parameters
----------
var_name : str
Data variable name.
add_if_missing : boolean
Add quality control variable if missing from object.
cleanup : boolean
Option to run qc.clean.cleanup() method on the object
to ensure the object was updated from ARM QC to the
correct standardized QC.
flag_type : boolean
Indicating the QC variable uses flag_values instead of
flag_masks.
Returns
-------
qc_var_name : str or None
Name of existing or new quality control variable. Returns
None if no existing quality control variable is found and
add_if_missing is set to False.
"""
qc_var_name = None
try:
ancillary_variables = \
self._obj[var_name].attrs['ancillary_variables']
if isinstance(ancillary_variables, str):
ancillary_variables = ancillary_variables.split()
for var in ancillary_variables:
for attr, value in self._obj[var].attrs.items():
if attr == 'standard_name' and 'quality_flag' in value:
qc_var_name = var
if add_if_missing and qc_var_name is None:
qc_var_name = self._obj.qcfilter.create_qc_variable(
var_name, flag_type=flag_type)
except KeyError:
# Since no ancillary_variables exist look for ARM style of QC
# variable name. If it exists use it else create new
# QC varaible.
if add_if_missing:
try:
self._obj['qc_' + var_name]
qc_var_name = 'qc_' + var_name
except KeyError:
qc_var_name = self._obj.qcfilter.create_qc_variable(
var_name, flag_type=flag_type)
# Make sure data varaible has a variable attribute linking
# data variable to QC variable.
if add_if_missing:
self._obj.qcfilter.update_ancillary_variable(var_name, qc_var_name)
# Clean up quality control variables to the requried standard in the
# xarray object. If the quality control variables are already cleaned
# the extra work is small since it's just checking.
if cleanup:
self._obj.clean.cleanup(handle_missing_value=True,
link_qc_variables=False)
return qc_var_name
def create_qc_variable(self, var_name, flag_type=False,
flag_values_set_value=0,
qc_var_name=None):
"""
Method to create a quality control variable in the dataset.
Will try not to destroy the qc variable by appending numbers
to the variable name if needed.
Parameters
----------
var_name : str
Data variable name.
flag_type : boolean
If an integer flag type should be created instead of
bitpacked mask type. Will create flag_values instead of
flag_masks.
flag_values_set_value : int
Initial flag value to use when initializing array.
qc_var_name : str
Optional new quality control variable name. If not set
will create one using \\"qc\\_\\" prepended to the data
variable name. If the name given or created is taken
will append a number that does not have a conflict.
Returns
-------
qc_var_name : str
Name of new quality control variable created.
"""
# Make QC variable long name. The variable long_name attribute
# may not exist so catch that error and set to default.
try:
qc_variable_long_name = ('Quality check results on field: ' +
self._obj[var_name].attrs['long_name'])
except KeyError:
qc_variable_long_name = 'Quality check results for ' + var_name
# Make a new quality control variable name. Check if exists in the
# dataset. If so loop through creation of new name until one is
# found that will not replace existing variable.
if qc_var_name is None:
qc_var_name = 'qc_' + var_name
variable_names = list(self._obj.data_vars)
if qc_var_name in variable_names:
for ii in range(1, 100):
temp_qc_var_name = '_'.join([qc_var_name, str(ii)])
if temp_qc_var_name not in variable_names:
qc_var_name = temp_qc_var_name
break
# Create the QC variable filled with 0 values matching the
# shape of data variable.
try:
qc_data = dask.array.from_array(
np.zeros_like(self._obj[var_name].values, dtype=np.int32),
chunks=self._obj[var_name].data.chunksize)
except AttributeError:
qc_data = np.zeros_like(self._obj[var_name].values, dtype=np.int32)
self._obj[qc_var_name] = xr.DataArray(
data=qc_data, dims=self._obj[var_name].dims,
attrs={"long_name": qc_variable_long_name,
"units": '1'}
)
# Update if using flag_values and don't want 0 to be default value.
if flag_type and flag_values_set_value != 0:
self._obj[qc_var_name].values = \
self._obj[qc_var_name].values + int(flag_values_set_value)
# Add requried variable attributes.
if flag_type:
self._obj[qc_var_name].attrs['flag_values'] = []
else:
self._obj[qc_var_name].attrs['flag_masks'] = []
self._obj[qc_var_name].attrs['flag_meanings'] = []
self._obj[qc_var_name].attrs['flag_assessments'] = []
self._obj[qc_var_name].attrs['standard_name'] = 'quality_flag'
self.update_ancillary_variable(var_name, qc_var_name=qc_var_name)
return qc_var_name
def update_ancillary_variable(self, var_name, qc_var_name=None):
"""
Method to check if ancillary_variables variable attribute
is set with quality control variable name.
Parameters
----------
var_name : str
Data variable name.
qc_var_name : str
quality control variable name. If not given will attempt
to get the name from data variable ancillary_variables
attribute.
"""
if qc_var_name is None:
qc_var_name = self._obj.qcfilter.check_for_ancillary_qc(
var_name, add_if_missing=False)
if qc_var_name is None:
return
try:
ancillary_variables = \
self._obj[var_name].attrs['ancillary_variables']
if qc_var_name not in ancillary_variables:
ancillary_variables = ' '.join([ancillary_variables,
qc_var_name])
except KeyError:
ancillary_variables = qc_var_name
self._obj[var_name].attrs['ancillary_variables'] = ancillary_variables
def add_test(self, var_name, index=None, test_number=None,
test_meaning=None, test_assessment='Bad',
flag_value=False):
"""
Method to add a new test/filter to a quality control variable.
Parameters
----------
var_name : str
data variable name
index : int, bool, list of int or bool, numpy array, tuple of numpy arrays
Indexes into quality control array to set the test bit.
If not set or set to None will not set the test on any
element of the quality control variable but will still
add the test to the flag_masks, flag_meanings and
flag_assessments attributes.
test_number : int
Test number to use. If keyword is not set will use first
available test bit/test number.
test_meaning : str
String describing the test. Will be added to flag_meanings
variable attribute.
test_assessment : str
String describing the test assessment. If not set will use
"Bad" as the string to append to flag_assessments. Will
update to be lower case and then capitalized.
flag_value : boolean
Switch to use flag_values integer quality control.
Returns
-------
test_dict : dict
A dictionary containing information added to the QC
variable.
Examples
--------
> result = ds_object.qcfilter.add_test(
var_name, test_meaning='Birds!')
"""
test_dict = {}
if test_meaning is None:
raise ValueError('You need to provide a value for test_meaning '
'keyword when calling the add_test method')
# This ensures the indexing will work even if given float values.
# Preserves tuples from np.where() or boolean arrays for standard
# python indexing.
if index is not None and not isinstance(index, (np.ndarray, tuple)):
index = np.array(index)
if index.dtype.kind == 'f':
index = index.astype(int)
# Ensure assessment is lowercase and capitalized to be consistent
test_assessment = test_assessment.lower().capitalize()
qc_var_name = self._obj.qcfilter.check_for_ancillary_qc(
var_name, flag_type=flag_value)
if test_number is None:
test_number = self._obj.qcfilter.available_bit(
qc_var_name)
self._obj.qcfilter.set_test(var_name, index, test_number, flag_value)
if flag_value:
try:
self._obj[qc_var_name].attrs['flag_values'].append(test_number)
except KeyError:
self._obj[qc_var_name].attrs['flag_values'] = [test_number]
else:
try:
if isinstance(self._obj[qc_var_name].attrs['flag_masks'], list):
self._obj[qc_var_name].attrs['flag_masks'].append(set_bit(0, test_number))
else:
flag_masks = np.append(self._obj[qc_var_name].attrs['flag_masks'],
set_bit(0, test_number))
self._obj[qc_var_name].attrs['flag_masks'] = flag_masks
except KeyError:
self._obj[qc_var_name].attrs['flag_masks'] = [set_bit(0, test_number)]
try:
self._obj[qc_var_name].attrs['flag_meanings'].append(test_meaning)
except KeyError:
self._obj[qc_var_name].attrs['flag_meanings'] = [test_meaning]
try:
self._obj[qc_var_name].attrs['flag_assessments'].append(test_assessment)
except KeyError:
self._obj[qc_var_name].attrs['flag_assessments'] = [test_assessment]
test_dict['test_number'] = test_number
test_dict['test_meaning'] = test_meaning
test_dict['test_assessment'] = test_assessment
test_dict['qc_variable_name'] = qc_var_name
test_dict['variable_name'] = var_name
return test_dict
def remove_test(self, var_name, test_number=None, flag_value=False,
flag_values_reset_value=0):
"""
Method to remove a test/filter from a quality control variable.
Parameters
----------
var_name : str
Data variable name.
test_number : int
Test number to remove.
flag_value : boolean
Switch to use flag_values integer quality control.
flag_values_reset_value : int
Value to use when resetting a flag_values value to not be set.
Examples
--------
> ds_object.qcfilter.remove_test(
var_name, test_number=3)
"""
if test_number is None:
raise ValueError('You need to provide a value for test_number '
'keyword when calling the add_test method')
qc_var_name = self._obj.qcfilter.check_for_ancillary_qc(var_name)
# Determine which index is using the test number
index = None
if flag_value:
flag_values = self._obj[qc_var_name].attrs['flag_values']
for ii, flag_num in enumerate(flag_values):
if flag_num == test_number:
index = ii
break
else:
flag_masks = self._obj[qc_var_name].attrs['flag_masks']
for ii, bit_num in enumerate(flag_masks):
if parse_bit(bit_num)[0] == test_number:
index = ii
break
# If can't find the index of test return before doing anything.
if index is None:
return
if flag_value:
remove_index = self._obj.qcfilter.get_qc_test_mask(
var_name, test_number, return_index=True, flag_value=True)
self._obj.qcfilter.unset_test(var_name, remove_index, test_number,
flag_value, flag_values_reset_value)
del flag_values[index]
self._obj[qc_var_name].attrs['flag_values'] = flag_values
else:
remove_index = self._obj.qcfilter.get_qc_test_mask(
var_name, test_number, return_index=True)
self._obj.qcfilter.unset_test(var_name, remove_index, test_number,
flag_value, flag_values_reset_value)
del flag_masks[index]
self._obj[qc_var_name].attrs['flag_masks'] = flag_masks
flag_meanings = self._obj[qc_var_name].attrs['flag_meanings']
del flag_meanings[index]
self._obj[qc_var_name].attrs['flag_meanings'] = flag_meanings
flag_assessments = self._obj[qc_var_name].attrs['flag_assessments']
del flag_assessments[index]
self._obj[qc_var_name].attrs['flag_assessments'] = flag_assessments
def set_test(self, var_name, index=None, test_number=None,
flag_value=False):
"""
Method to set a test/filter in a quality control variable.
Parameters
----------
var_name : str
Data variable name.
index : int or list or numpy array
Index to set test in quality control array. If want to
unset all values will need to pass in index of all values.
test_number : int
Test number to set.
flag_value : boolean
Switch to use flag_values integer quality control.
Examples
--------
.. code-block:: python
index = [0, 1, 2, 30]
ds_object.qcfilter.set_test(
var_name, index=index, test_number=2)
"""
if index is None:
return
qc_var_name = self._obj.qcfilter.check_for_ancillary_qc(var_name)
qc_variable = np.array(self._obj[qc_var_name].values)
if index is not None:
if flag_value:
qc_variable[index] = test_number
else:
qc_variable[index] = set_bit(qc_variable[index], test_number)
self._obj[qc_var_name].values = qc_variable
def unset_test(self, var_name, index=None, test_number=None,
flag_value=False, flag_values_reset_value=0):
"""
Method to unset a test/filter from a quality control variable.
Parameters
----------
var_name : str
Data variable name.
index : int or list or numpy array
Index to unset test in quality control array. If want to
unset all values will need to pass in index of all values.
test_number : int
Test number to remove.
flag_value : boolean
Switch to use flag_values integer quality control.
flag_values_reset_value : int
Value to use when resetting a flag_values value to not be set.
Examples
--------
.. code-block:: python
ds_object.qcfilter.unset_test(
var_name, index=0, test_number=2)
"""
if index is None:
return
qc_var_name = self._obj.qcfilter.check_for_ancillary_qc(var_name)
qc_variable = self._obj[qc_var_name].values
if flag_value:
qc_variable[index] = flag_values_reset_value
else:
qc_variable[index] = unset_bit(qc_variable[index], test_number)
self._obj[qc_var_name].values = qc_variable
def available_bit(self, qc_var_name, recycle=False):
"""
Method to determine next available bit or flag to use with a QC test.
Will check for flag_masks first and if not found will check for
flag_values. This will drive how the next value is chosen.
Parameters
----------
qc_var_name : str
Quality control variable name.
recycle : boolean
Option to look for a bit (test) not in use starting from 1.
If a test is not defined will return the lowest number, else
will just use next highest number.
Returns
-------
test_num : int
Next available test number.
"""
try:
flag_masks = self._obj[qc_var_name].attrs['flag_masks']
flag_value = False
except KeyError:
try:
flag_masks = self._obj[qc_var_name].attrs['flag_values']
flag_value = True
except KeyError:
try:
self._obj[qc_var_name].attrs['flag_values']
flag_masks = self._obj[qc_var_name].attrs['flag_masks']
flag_value = False
except KeyError:
raise ValueError('Problem getting next value from '
'available_bit(). flag_values and '
'flag_masks not set as expected')
if flag_masks == []:
next_bit = 1
else:
if flag_value:
if recycle:
next_bit = min(set(range(1, 100000)) - set(flag_masks))
else:
next_bit = max(flag_masks) + 1
else:
if recycle:
tests = [parse_bit(mask)[0] for mask in flag_masks]
next_bit = min(set(range(1, 63)) - set(tests))
else:
next_bit = parse_bit(max(flag_masks))[0] + 1
return int(next_bit)
def get_qc_test_mask(self, var_name, test_number, flag_value=False,
return_index=False):
"""
Returns a numpy array of False or True where a particular
flag or bit is set in a numpy array.
Parameters
----------
var_name : str
Data variable name.
test_number : int
Test number to return array where test is set.
flag_value : boolean
Switch to use flag_values integer quality control.
return_index : boolean
Return a numpy array of index numbers into QC array where the
test is set instead of 0 or 1 mask.
Returns
-------
test_mask : bool array
A numpy boolean array with False or True where the test number or
bit was set.
Examples
--------
.. code-block:: python
from act.io.armfiles import read_netcdf
from act.tests import EXAMPLE_IRT25m20s
ds_object = read_netcdf(EXAMPLE_IRT25m20s)
var_name = 'inst_up_long_dome_resist'
result = ds_object.qcfilter.add_test(
var_name, index=[0, 1, 2], test_meaning='Birds!')
qc_var_name = result['qc_variable_name']
mask = ds_object.qcfilter.get_qc_test_mask(
var_name, result['test_number'], return_index=True)
print(mask)
array([0, 1, 2])
mask = ds_object.qcfilter.get_qc_test_mask(
var_name, result['test_number'])
print(mask)
array([ True, True, True, ..., False, False, False])
data = ds_object[var_name].values
print(data[mask])
array([7.84 , 7.8777, 7.8965], dtype=float32)
import numpy as np
data[mask] = np.nan
print(data)
array([ nan, nan, nan, ..., 7.6705, 7.6892, 7.6892],
dtype=float32)
"""
qc_var_name = self._obj.qcfilter.check_for_ancillary_qc(var_name)
qc_variable = self._obj[qc_var_name].values
if flag_value:
tripped = np.where(qc_variable == test_number)
else:
check_bit = set_bit(0, test_number) & qc_variable
tripped = np.where(check_bit > 0)
test_mask = np.zeros(qc_variable.shape, dtype='int')
# Make sure test_mask is an array. If qc_variable is scalar will
# be retuned from np.zeros as scalar.
test_mask = np.atleast_1d(test_mask)
test_mask[tripped] = 1
test_mask = np.ma.make_mask(test_mask, shrink=False)
if return_index:
test_mask = np.where(test_mask)[0]
return test_mask
def get_masked_data(self, var_name, rm_assessments=None,
rm_tests=None, return_nan_array=False,
ma_fill_value=None, return_inverse=False):
"""
Returns a numpy masked array containing data and mask or
a numpy float array with masked values set to NaN.
Parameters
----------
var_name : str
Data variable name.
rm_assessments : str or list of str
Assessment name to exclude from returned data.
rm_tests : int or list of int
Test numbers to exclude from returned data. This is the test
number (or bit position number) not the mask number.
return_nan_array : boolean
Return a numpy array with filtered ( or masked) values
set to numpy NaN value. If the data is type int will upconvert
to numpy float to allow setting NaN value.
ma_fill_value : int or float (or str?)
The numpy masked array fill_value used in creation of the the
masked array. If the datatype needs to be upconverted to allow
the fill value to be used, data will be upconverted.
return_inverse : boolean
Invert the masked array mask or return data array where mask is set
to False instead of True set to NaN. Useful for overplotting
where failing.
Returns
-------
variable : numpy masked array or numpy float array
Default is to return a numpy masked array with the mask set to
True where the test with requested assessment or test number
was found set.
If return_nan_array is True will return numpy array upconverted
to float with locations where the test with requested assessment
or test number was found set converted to NaN.
Examples
--------
.. code-block:: python
from act.io.armfiles import read_netcdf
from act.tests import EXAMPLE_IRT25m20s
ds_object = read_netcdf(EXAMPLE_IRT25m20s)
var_name = 'inst_up_long_dome_resist'
result = ds_object.qcfilter.add_test(
var_name, index=[0, 1, 2], test_meaning='Birds!')
data = ds_object.qcfilter.get_masked_data(var_name,
rm_assessments=['Bad', 'Indeterminate'])
print(data)
masked_array(data=[--, --, --, ..., 7.670499801635742,
7.689199924468994, 7.689199924468994],
mask=[ True, True, True, ..., False, False, False],
fill_value=1e+20, dtype=float32)
"""
qc_var_name = self._obj.qcfilter.check_for_ancillary_qc(
var_name, add_if_missing=False)
flag_value = False
flag_values = None
flag_masks = None
flag_assessments = None
try:
flag_assessments = self._obj[qc_var_name].attrs['flag_assessments']
flag_masks = self._obj[qc_var_name].attrs['flag_masks']
except KeyError:
pass
try:
flag_values = self._obj[qc_var_name].attrs['flag_values']
flag_value = True
except KeyError:
pass
test_numbers = []
if rm_tests is not None:
if isinstance(rm_tests, (int, float, str)):
rm_tests = [int(rm_tests)]
test_numbers.extend(rm_tests)
if rm_assessments is not None:
if isinstance(rm_assessments, str):
rm_assessments = [rm_assessments]
if flag_masks is not None:
test_nums = [parse_bit(mask)[0] for mask in flag_masks]
if flag_values is not None:
test_nums = flag_values
rm_assessments = [x.lower() for x in rm_assessments]
if flag_assessments is not None:
for ii, assessment in enumerate(flag_assessments):
if assessment.lower() in rm_assessments:
test_numbers.append(test_nums[ii])
# Make the list of test numbers to mask unique
test_numbers = list(set(test_numbers))
# Create mask of indexes by looking where each test is set
variable = self._obj[var_name].values
nan_dtype = np.float32
if variable.dtype in (np.float64, np.int64):
nan_dtype = np.float64
mask = np.zeros(variable.shape, dtype=bool)
for test in test_numbers:
mask = mask | self._obj.qcfilter.get_qc_test_mask(
var_name, test, flag_value=flag_value)
# Convert data numpy array into masked array
try:
variable = np.ma.array(variable, mask=mask,
fill_value=ma_fill_value)
except TypeError:
variable = np.ma.array(variable, mask=mask,
fill_value=ma_fill_value,
dtype=np.array(ma_fill_value).dtype)
# If requested switch array from where data is not failing tests
# to where data is failing tests. This can be used when over plotting
# where the data if failing the tests.
if return_inverse:
mask = variable.mask
mask = np.invert(mask)
variable.mask = mask
# If asked to return numpy array with values set to NaN
if return_nan_array:
variable = variable.astype(nan_dtype)
variable = variable.filled(fill_value=np.nan)
return variable
def datafilter(self, variables=None, rm_assessments=None, rm_tests=None,
np_ma=True, verbose=False, del_qc_var=True):
"""
Method to apply quality control variables to data variables by
changing the data values in the dataset using quality control variables.
The data variable is changed to to a numpy masked array with failing
data masked or, if requested, to numpy array with failing data set to
NaN. This can be used to update the data variable in the xarray
dataset for use with xarray methods to perform analysis on the data
since those methods don't read the quality control variables.
Parameters
----------
variables : None or str or list of str
Data variable names to process
rm_assessments : str or list of str
Assessment names listed under quality control varible flag_assessments
to exclude from returned data. Examples include
['Bad', 'Incorrect', 'Indeterminate', 'Suspect']
rm_tests : int or list of int
Test numbers listed under quality control variable to exclude from
returned data. This is the test
number (or bit position number) not the mask number.
np_ma : boolean
Shoudl the data in the xarray DataArray be set to numpy masked
arrays. This shoudl work with most xarray methods. If the xarray
processing method does not work with numpy masked array set to
False to use NaN.
verbose : boolean
Print processing information.
del_qc_var : boolean
Opttion to delete quality control variable after processing. Since
the data values can not be determined after they are set to NaN
and xarray method processing would also process the quality control
variables, the default is to remove the quality control data
variables. If numpy masked arrays are used the data are not lost
but would need to be extracted and set to DataArray to return the
dataset back to original state.
Examples
--------
.. code-block:: python
from act.io.armfiles import read_netcdf
from act.tests import EXAMPLE_MET1
ds = read_netcdf(EXAMPLE_MET1)
ds.clean.cleanup()
var_name = 'atmos_pressure'
ds_1 = ds.mean()
ds.qcfilter.add_less_test(var_name, 99, test_assessment='Bad')
ds.qcfilter.datafilter(rm_assessments='Bad')
ds_2 = ds.mean()
print(f'All data: {ds_1[var_name].values}, Bad Removed: {ds_2[var_name].values}')
All data: 98.86097717285156, Bad Removed: 99.15148162841797
"""
if variables is not None and isinstance(variables, str):
variables = [variables]
if variables is None:
variables = list(self._obj.data_vars)
for var_name in variables:
qc_var_name = self.check_for_ancillary_qc(var_name,
add_if_missing=False,
cleanup=False)
if qc_var_name is None:
if verbose:
print(f'No quality control variable for {var_name} found '
f'in call to .qcfilter.datafilter()')
continue
data = self.get_masked_data(var_name, rm_assessments=rm_assessments,
rm_tests=rm_tests, ma_fill_value=np_ma)
self._obj[var_name].values = data
if del_qc_var:
del self._obj[qc_var_name]
if verbose:
print(f'Deleting {qc_var_name} from dataset')
def set_bit(array, bit_number):
"""
Function to set a quality control bit given an a scalar or
array of values and a bit number.
Parameters
----------
array : int or numpy array
The bitpacked array to set the bit number.
bit_number : int
The bit (or test) number to set.
Returns
-------
array : int, numpy array, tuple, list
Integer or numpy array with bit set for each element of the array.
Returned in same type.
Examples
--------
Example use setting bit 2 to an array called data:
.. code-block:: python
data = np.array(range(0, 7))
data = set_bit(data, 2)
print(data)
array([2, 3, 2, 3, 6, 7, 6])
"""
was_list = False
was_tuple = False
if isinstance(array, list):
array = np.array(array)
was_list = True
if isinstance(array, tuple):
array = np.array(array)
was_tuple = True
if bit_number > 0:
array |= (1 << bit_number - 1)
if was_list:
array = list(array)
if was_tuple:
array = tuple(array)
return array
def unset_bit(array, bit_number):
"""
Function to remove a quality control bit given a
scalar or array of values and a bit number.
Parameters
----------
array : int or numpy array
Array of integers containing bit packed numbers.
bit_number : int
Bit number to remove.
Returns
-------
array : int or numpy array
Returns same data type as array entered with bit removed. Will
fail gracefully if the bit requested to be removed was not set.
Examples
--------
Example use removing bit 2 from an array called data:
> data = set_bit(0,2)
> data = set_bit(data,3)
> data
6
> data = unset_bit(data,2)
> data
4
"""
was_list = False
was_tuple = False
if isinstance(array, list):
array = np.array(array)
was_list = True
if isinstance(array, tuple):
array = np.array(array)
was_tuple = True
if bit_number > 0:
array = array & ~ (1 << bit_number - 1)
if was_list:
array = list(array)
if was_tuple:
array = tuple(array)
return array
def parse_bit(qc_bit):
"""
Given a single integer value, return bit positions.
Parameters
----------
qc_bit : int or numpy int
Bit packed integer number to be parsed.
Returns
-------
bit_number : numpy.int32 array
Array containing all bit numbers of the bit packed number.
If no bits set returns empty array.
Examples
--------
> parse_bit(7)
array([1, 2, 3])
"""
if isinstance(qc_bit, (list, tuple, np.ndarray)):
if len(qc_bit) > 1:
raise ValueError("Must be a single value.")
qc_bit = qc_bit[0]
if qc_bit < 0:
raise ValueError("Must be a positive integer.")
bit_number = []
# if qc_bit == 0:
# bit_number.append(0)
counter = 0
while qc_bit > 0:
temp_value = qc_bit % 2
qc_bit = qc_bit >> 1
counter += 1
if temp_value == 1:
bit_number.append(counter)
bit_number = np.asarray(bit_number, dtype=np.int32)
return bit_number
<|code_end|>
|
act/qc/qcfilter.py
<|code_start|>"""
Functions and methods for creating ancillary quality control variables
and filters (masks) which can be used with various corrections
routines in ACT.
"""
import numpy as np
import xarray as xr
import dask
from act.qc import qctests, comparison_tests
@xr.register_dataset_accessor('qcfilter')
class QCFilter(qctests.QCTests, comparison_tests.QCTests, object):
"""
A class for building quality control variables containing arrays for
filtering data based on a set of test condition typically based on the
values in the data fields. These filters can be used in various
algorithms and calculations within ACT.
"""
def __init__(self, xarray_obj):
""" initialize """
self._obj = xarray_obj
def check_for_ancillary_qc(self, var_name, add_if_missing=True,
cleanup=True, flag_type=False):
"""
Method to check if a quality control variable exist in the dataset
and return the quality control varible name.
Will call create_qc_variable() to make variable if does not exist
and update_ancillary_variable() to ensure linkage between data and
quality control variable. Can also be used just to get the
corresponding quality control variable name with adding if
it is missing.
Parameters
----------
var_name : str
Data variable name.
add_if_missing : boolean
Add quality control variable if missing from object.
cleanup : boolean
Option to run qc.clean.cleanup() method on the object
to ensure the object was updated from ARM QC to the
correct standardized QC.
flag_type : boolean
Indicating the QC variable uses flag_values instead of
flag_masks.
Returns
-------
qc_var_name : str or None
Name of existing or new quality control variable. Returns
None if no existing quality control variable is found and
add_if_missing is set to False.
"""
qc_var_name = None
try:
ancillary_variables = \
self._obj[var_name].attrs['ancillary_variables']
if isinstance(ancillary_variables, str):
ancillary_variables = ancillary_variables.split()
for var in ancillary_variables:
for attr, value in self._obj[var].attrs.items():
if attr == 'standard_name' and 'quality_flag' in value:
qc_var_name = var
if add_if_missing and qc_var_name is None:
qc_var_name = self._obj.qcfilter.create_qc_variable(
var_name, flag_type=flag_type)
except KeyError:
# Since no ancillary_variables exist look for ARM style of QC
# variable name. If it exists use it else create new
# QC varaible.
if add_if_missing:
try:
self._obj['qc_' + var_name]
qc_var_name = 'qc_' + var_name
except KeyError:
qc_var_name = self._obj.qcfilter.create_qc_variable(
var_name, flag_type=flag_type)
# Make sure data varaible has a variable attribute linking
# data variable to QC variable.
if add_if_missing:
self._obj.qcfilter.update_ancillary_variable(var_name, qc_var_name)
# Clean up quality control variables to the requried standard in the
# xarray object. If the quality control variables are already cleaned
# the extra work is small since it's just checking.
if cleanup:
self._obj.clean.cleanup(handle_missing_value=True,
link_qc_variables=False)
return qc_var_name
def create_qc_variable(self, var_name, flag_type=False,
flag_values_set_value=0,
qc_var_name=None):
"""
Method to create a quality control variable in the dataset.
Will try not to destroy the qc variable by appending numbers
to the variable name if needed.
Parameters
----------
var_name : str
Data variable name.
flag_type : boolean
If an integer flag type should be created instead of
bitpacked mask type. Will create flag_values instead of
flag_masks.
flag_values_set_value : int
Initial flag value to use when initializing array.
qc_var_name : str
Optional new quality control variable name. If not set
will create one using \\"qc\\_\\" prepended to the data
variable name. If the name given or created is taken
will append a number that does not have a conflict.
Returns
-------
qc_var_name : str
Name of new quality control variable created.
"""
# Make QC variable long name. The variable long_name attribute
# may not exist so catch that error and set to default.
try:
qc_variable_long_name = ('Quality check results on field: ' +
self._obj[var_name].attrs['long_name'])
except KeyError:
qc_variable_long_name = 'Quality check results for ' + var_name
# Make a new quality control variable name. Check if exists in the
# dataset. If so loop through creation of new name until one is
# found that will not replace existing variable.
if qc_var_name is None:
qc_var_name = 'qc_' + var_name
variable_names = list(self._obj.data_vars)
if qc_var_name in variable_names:
for ii in range(1, 100):
temp_qc_var_name = '_'.join([qc_var_name, str(ii)])
if temp_qc_var_name not in variable_names:
qc_var_name = temp_qc_var_name
break
# Create the QC variable filled with 0 values matching the
# shape of data variable.
try:
qc_data = dask.array.from_array(
np.zeros_like(self._obj[var_name].values, dtype=np.int32),
chunks=self._obj[var_name].data.chunksize)
except AttributeError:
qc_data = np.zeros_like(self._obj[var_name].values, dtype=np.int32)
# Updating to use coords instead of dim, which caused a loss of
# attribuets as noted in Issue 347
self._obj[qc_var_name] = xr.DataArray(
data=qc_data, coords=self._obj[var_name].coords,
attrs={"long_name": qc_variable_long_name,
"units": '1'}
)
# Update if using flag_values and don't want 0 to be default value.
if flag_type and flag_values_set_value != 0:
self._obj[qc_var_name].values = \
self._obj[qc_var_name].values + int(flag_values_set_value)
# Add requried variable attributes.
if flag_type:
self._obj[qc_var_name].attrs['flag_values'] = []
else:
self._obj[qc_var_name].attrs['flag_masks'] = []
self._obj[qc_var_name].attrs['flag_meanings'] = []
self._obj[qc_var_name].attrs['flag_assessments'] = []
self._obj[qc_var_name].attrs['standard_name'] = 'quality_flag'
self.update_ancillary_variable(var_name, qc_var_name=qc_var_name)
return qc_var_name
def update_ancillary_variable(self, var_name, qc_var_name=None):
"""
Method to check if ancillary_variables variable attribute
is set with quality control variable name.
Parameters
----------
var_name : str
Data variable name.
qc_var_name : str
quality control variable name. If not given will attempt
to get the name from data variable ancillary_variables
attribute.
"""
if qc_var_name is None:
qc_var_name = self._obj.qcfilter.check_for_ancillary_qc(
var_name, add_if_missing=False)
if qc_var_name is None:
return
try:
ancillary_variables = \
self._obj[var_name].attrs['ancillary_variables']
if qc_var_name not in ancillary_variables:
ancillary_variables = ' '.join([ancillary_variables,
qc_var_name])
except KeyError:
ancillary_variables = qc_var_name
self._obj[var_name].attrs['ancillary_variables'] = ancillary_variables
def add_test(self, var_name, index=None, test_number=None,
test_meaning=None, test_assessment='Bad',
flag_value=False):
"""
Method to add a new test/filter to a quality control variable.
Parameters
----------
var_name : str
data variable name
index : int, bool, list of int or bool, numpy array, tuple of numpy arrays
Indexes into quality control array to set the test bit.
If not set or set to None will not set the test on any
element of the quality control variable but will still
add the test to the flag_masks, flag_meanings and
flag_assessments attributes.
test_number : int
Test number to use. If keyword is not set will use first
available test bit/test number.
test_meaning : str
String describing the test. Will be added to flag_meanings
variable attribute.
test_assessment : str
String describing the test assessment. If not set will use
"Bad" as the string to append to flag_assessments. Will
update to be lower case and then capitalized.
flag_value : boolean
Switch to use flag_values integer quality control.
Returns
-------
test_dict : dict
A dictionary containing information added to the QC
variable.
Examples
--------
> result = ds_object.qcfilter.add_test(
var_name, test_meaning='Birds!')
"""
test_dict = {}
if test_meaning is None:
raise ValueError('You need to provide a value for test_meaning '
'keyword when calling the add_test method')
# This ensures the indexing will work even if given float values.
# Preserves tuples from np.where() or boolean arrays for standard
# python indexing.
if index is not None and not isinstance(index, (np.ndarray, tuple)):
index = np.array(index)
if index.dtype.kind == 'f':
index = index.astype(int)
# Ensure assessment is lowercase and capitalized to be consistent
test_assessment = test_assessment.lower().capitalize()
qc_var_name = self._obj.qcfilter.check_for_ancillary_qc(
var_name, flag_type=flag_value)
if test_number is None:
test_number = self._obj.qcfilter.available_bit(
qc_var_name)
self._obj.qcfilter.set_test(var_name, index, test_number, flag_value)
if flag_value:
try:
self._obj[qc_var_name].attrs['flag_values'].append(test_number)
except KeyError:
self._obj[qc_var_name].attrs['flag_values'] = [test_number]
else:
try:
if isinstance(self._obj[qc_var_name].attrs['flag_masks'], list):
self._obj[qc_var_name].attrs['flag_masks'].append(set_bit(0, test_number))
else:
flag_masks = np.append(self._obj[qc_var_name].attrs['flag_masks'],
set_bit(0, test_number))
self._obj[qc_var_name].attrs['flag_masks'] = flag_masks
except KeyError:
self._obj[qc_var_name].attrs['flag_masks'] = [set_bit(0, test_number)]
try:
self._obj[qc_var_name].attrs['flag_meanings'].append(test_meaning)
except KeyError:
self._obj[qc_var_name].attrs['flag_meanings'] = [test_meaning]
try:
self._obj[qc_var_name].attrs['flag_assessments'].append(test_assessment)
except KeyError:
self._obj[qc_var_name].attrs['flag_assessments'] = [test_assessment]
test_dict['test_number'] = test_number
test_dict['test_meaning'] = test_meaning
test_dict['test_assessment'] = test_assessment
test_dict['qc_variable_name'] = qc_var_name
test_dict['variable_name'] = var_name
return test_dict
def remove_test(self, var_name, test_number=None, flag_value=False,
flag_values_reset_value=0):
"""
Method to remove a test/filter from a quality control variable.
Parameters
----------
var_name : str
Data variable name.
test_number : int
Test number to remove.
flag_value : boolean
Switch to use flag_values integer quality control.
flag_values_reset_value : int
Value to use when resetting a flag_values value to not be set.
Examples
--------
> ds_object.qcfilter.remove_test(
var_name, test_number=3)
"""
if test_number is None:
raise ValueError('You need to provide a value for test_number '
'keyword when calling the add_test method')
qc_var_name = self._obj.qcfilter.check_for_ancillary_qc(var_name)
# Determine which index is using the test number
index = None
if flag_value:
flag_values = self._obj[qc_var_name].attrs['flag_values']
for ii, flag_num in enumerate(flag_values):
if flag_num == test_number:
index = ii
break
else:
flag_masks = self._obj[qc_var_name].attrs['flag_masks']
for ii, bit_num in enumerate(flag_masks):
if parse_bit(bit_num)[0] == test_number:
index = ii
break
# If can't find the index of test return before doing anything.
if index is None:
return
if flag_value:
remove_index = self._obj.qcfilter.get_qc_test_mask(
var_name, test_number, return_index=True, flag_value=True)
self._obj.qcfilter.unset_test(var_name, remove_index, test_number,
flag_value, flag_values_reset_value)
del flag_values[index]
self._obj[qc_var_name].attrs['flag_values'] = flag_values
else:
remove_index = self._obj.qcfilter.get_qc_test_mask(
var_name, test_number, return_index=True)
self._obj.qcfilter.unset_test(var_name, remove_index, test_number,
flag_value, flag_values_reset_value)
del flag_masks[index]
self._obj[qc_var_name].attrs['flag_masks'] = flag_masks
flag_meanings = self._obj[qc_var_name].attrs['flag_meanings']
del flag_meanings[index]
self._obj[qc_var_name].attrs['flag_meanings'] = flag_meanings
flag_assessments = self._obj[qc_var_name].attrs['flag_assessments']
del flag_assessments[index]
self._obj[qc_var_name].attrs['flag_assessments'] = flag_assessments
def set_test(self, var_name, index=None, test_number=None,
flag_value=False):
"""
Method to set a test/filter in a quality control variable.
Parameters
----------
var_name : str
Data variable name.
index : int or list or numpy array
Index to set test in quality control array. If want to
unset all values will need to pass in index of all values.
test_number : int
Test number to set.
flag_value : boolean
Switch to use flag_values integer quality control.
Examples
--------
.. code-block:: python
index = [0, 1, 2, 30]
ds_object.qcfilter.set_test(
var_name, index=index, test_number=2)
"""
if index is None:
return
qc_var_name = self._obj.qcfilter.check_for_ancillary_qc(var_name)
qc_variable = np.array(self._obj[qc_var_name].values)
if index is not None:
if flag_value:
qc_variable[index] = test_number
else:
qc_variable[index] = set_bit(qc_variable[index], test_number)
self._obj[qc_var_name].values = qc_variable
def unset_test(self, var_name, index=None, test_number=None,
flag_value=False, flag_values_reset_value=0):
"""
Method to unset a test/filter from a quality control variable.
Parameters
----------
var_name : str
Data variable name.
index : int or list or numpy array
Index to unset test in quality control array. If want to
unset all values will need to pass in index of all values.
test_number : int
Test number to remove.
flag_value : boolean
Switch to use flag_values integer quality control.
flag_values_reset_value : int
Value to use when resetting a flag_values value to not be set.
Examples
--------
.. code-block:: python
ds_object.qcfilter.unset_test(
var_name, index=0, test_number=2)
"""
if index is None:
return
qc_var_name = self._obj.qcfilter.check_for_ancillary_qc(var_name)
qc_variable = self._obj[qc_var_name].values
if flag_value:
qc_variable[index] = flag_values_reset_value
else:
qc_variable[index] = unset_bit(qc_variable[index], test_number)
self._obj[qc_var_name].values = qc_variable
def available_bit(self, qc_var_name, recycle=False):
"""
Method to determine next available bit or flag to use with a QC test.
Will check for flag_masks first and if not found will check for
flag_values. This will drive how the next value is chosen.
Parameters
----------
qc_var_name : str
Quality control variable name.
recycle : boolean
Option to look for a bit (test) not in use starting from 1.
If a test is not defined will return the lowest number, else
will just use next highest number.
Returns
-------
test_num : int
Next available test number.
"""
try:
flag_masks = self._obj[qc_var_name].attrs['flag_masks']
flag_value = False
except KeyError:
try:
flag_masks = self._obj[qc_var_name].attrs['flag_values']
flag_value = True
except KeyError:
try:
self._obj[qc_var_name].attrs['flag_values']
flag_masks = self._obj[qc_var_name].attrs['flag_masks']
flag_value = False
except KeyError:
raise ValueError('Problem getting next value from '
'available_bit(). flag_values and '
'flag_masks not set as expected')
if flag_masks == []:
next_bit = 1
else:
if flag_value:
if recycle:
next_bit = min(set(range(1, 100000)) - set(flag_masks))
else:
next_bit = max(flag_masks) + 1
else:
if recycle:
tests = [parse_bit(mask)[0] for mask in flag_masks]
next_bit = min(set(range(1, 63)) - set(tests))
else:
next_bit = parse_bit(max(flag_masks))[0] + 1
return int(next_bit)
def get_qc_test_mask(self, var_name, test_number, flag_value=False,
return_index=False):
"""
Returns a numpy array of False or True where a particular
flag or bit is set in a numpy array.
Parameters
----------
var_name : str
Data variable name.
test_number : int
Test number to return array where test is set.
flag_value : boolean
Switch to use flag_values integer quality control.
return_index : boolean
Return a numpy array of index numbers into QC array where the
test is set instead of 0 or 1 mask.
Returns
-------
test_mask : bool array
A numpy boolean array with False or True where the test number or
bit was set.
Examples
--------
.. code-block:: python
from act.io.armfiles import read_netcdf
from act.tests import EXAMPLE_IRT25m20s
ds_object = read_netcdf(EXAMPLE_IRT25m20s)
var_name = 'inst_up_long_dome_resist'
result = ds_object.qcfilter.add_test(
var_name, index=[0, 1, 2], test_meaning='Birds!')
qc_var_name = result['qc_variable_name']
mask = ds_object.qcfilter.get_qc_test_mask(
var_name, result['test_number'], return_index=True)
print(mask)
array([0, 1, 2])
mask = ds_object.qcfilter.get_qc_test_mask(
var_name, result['test_number'])
print(mask)
array([ True, True, True, ..., False, False, False])
data = ds_object[var_name].values
print(data[mask])
array([7.84 , 7.8777, 7.8965], dtype=float32)
import numpy as np
data[mask] = np.nan
print(data)
array([ nan, nan, nan, ..., 7.6705, 7.6892, 7.6892],
dtype=float32)
"""
qc_var_name = self._obj.qcfilter.check_for_ancillary_qc(var_name)
qc_variable = self._obj[qc_var_name].values
if flag_value:
tripped = np.where(qc_variable == test_number)
else:
check_bit = set_bit(0, test_number) & qc_variable
tripped = np.where(check_bit > 0)
test_mask = np.zeros(qc_variable.shape, dtype='int')
# Make sure test_mask is an array. If qc_variable is scalar will
# be retuned from np.zeros as scalar.
test_mask = np.atleast_1d(test_mask)
test_mask[tripped] = 1
test_mask = np.ma.make_mask(test_mask, shrink=False)
if return_index:
test_mask = np.where(test_mask)[0]
return test_mask
def get_masked_data(self, var_name, rm_assessments=None,
rm_tests=None, return_nan_array=False,
ma_fill_value=None, return_inverse=False):
"""
Returns a numpy masked array containing data and mask or
a numpy float array with masked values set to NaN.
Parameters
----------
var_name : str
Data variable name.
rm_assessments : str or list of str
Assessment name to exclude from returned data.
rm_tests : int or list of int
Test numbers to exclude from returned data. This is the test
number (or bit position number) not the mask number.
return_nan_array : boolean
Return a numpy array with filtered ( or masked) values
set to numpy NaN value. If the data is type int will upconvert
to numpy float to allow setting NaN value.
ma_fill_value : int or float (or str?)
The numpy masked array fill_value used in creation of the the
masked array. If the datatype needs to be upconverted to allow
the fill value to be used, data will be upconverted.
return_inverse : boolean
Invert the masked array mask or return data array where mask is set
to False instead of True set to NaN. Useful for overplotting
where failing.
Returns
-------
variable : numpy masked array or numpy float array
Default is to return a numpy masked array with the mask set to
True where the test with requested assessment or test number
was found set.
If return_nan_array is True will return numpy array upconverted
to float with locations where the test with requested assessment
or test number was found set converted to NaN.
Examples
--------
.. code-block:: python
from act.io.armfiles import read_netcdf
from act.tests import EXAMPLE_IRT25m20s
ds_object = read_netcdf(EXAMPLE_IRT25m20s)
var_name = 'inst_up_long_dome_resist'
result = ds_object.qcfilter.add_test(
var_name, index=[0, 1, 2], test_meaning='Birds!')
data = ds_object.qcfilter.get_masked_data(var_name,
rm_assessments=['Bad', 'Indeterminate'])
print(data)
masked_array(data=[--, --, --, ..., 7.670499801635742,
7.689199924468994, 7.689199924468994],
mask=[ True, True, True, ..., False, False, False],
fill_value=1e+20, dtype=float32)
"""
qc_var_name = self._obj.qcfilter.check_for_ancillary_qc(
var_name, add_if_missing=False)
flag_value = False
flag_values = None
flag_masks = None
flag_assessments = None
try:
flag_assessments = self._obj[qc_var_name].attrs['flag_assessments']
flag_masks = self._obj[qc_var_name].attrs['flag_masks']
except KeyError:
pass
try:
flag_values = self._obj[qc_var_name].attrs['flag_values']
flag_value = True
except KeyError:
pass
test_numbers = []
if rm_tests is not None:
if isinstance(rm_tests, (int, float, str)):
rm_tests = [int(rm_tests)]
test_numbers.extend(rm_tests)
if rm_assessments is not None:
if isinstance(rm_assessments, str):
rm_assessments = [rm_assessments]
if flag_masks is not None:
test_nums = [parse_bit(mask)[0] for mask in flag_masks]
if flag_values is not None:
test_nums = flag_values
rm_assessments = [x.lower() for x in rm_assessments]
if flag_assessments is not None:
for ii, assessment in enumerate(flag_assessments):
if assessment.lower() in rm_assessments:
test_numbers.append(test_nums[ii])
# Make the list of test numbers to mask unique
test_numbers = list(set(test_numbers))
# Create mask of indexes by looking where each test is set
variable = self._obj[var_name].values
nan_dtype = np.float32
if variable.dtype in (np.float64, np.int64):
nan_dtype = np.float64
mask = np.zeros(variable.shape, dtype=bool)
for test in test_numbers:
mask = mask | self._obj.qcfilter.get_qc_test_mask(
var_name, test, flag_value=flag_value)
# Convert data numpy array into masked array
try:
variable = np.ma.array(variable, mask=mask,
fill_value=ma_fill_value)
except TypeError:
variable = np.ma.array(variable, mask=mask,
fill_value=ma_fill_value,
dtype=np.array(ma_fill_value).dtype)
# If requested switch array from where data is not failing tests
# to where data is failing tests. This can be used when over plotting
# where the data if failing the tests.
if return_inverse:
mask = variable.mask
mask = np.invert(mask)
variable.mask = mask
# If asked to return numpy array with values set to NaN
if return_nan_array:
variable = variable.astype(nan_dtype)
variable = variable.filled(fill_value=np.nan)
return variable
def datafilter(self, variables=None, rm_assessments=None, rm_tests=None,
np_ma=True, verbose=False, del_qc_var=True):
"""
Method to apply quality control variables to data variables by
changing the data values in the dataset using quality control variables.
The data variable is changed to to a numpy masked array with failing
data masked or, if requested, to numpy array with failing data set to
NaN. This can be used to update the data variable in the xarray
dataset for use with xarray methods to perform analysis on the data
since those methods don't read the quality control variables.
Parameters
----------
variables : None or str or list of str
Data variable names to process
rm_assessments : str or list of str
Assessment names listed under quality control varible flag_assessments
to exclude from returned data. Examples include
['Bad', 'Incorrect', 'Indeterminate', 'Suspect']
rm_tests : int or list of int
Test numbers listed under quality control variable to exclude from
returned data. This is the test
number (or bit position number) not the mask number.
np_ma : boolean
Shoudl the data in the xarray DataArray be set to numpy masked
arrays. This shoudl work with most xarray methods. If the xarray
processing method does not work with numpy masked array set to
False to use NaN.
verbose : boolean
Print processing information.
del_qc_var : boolean
Opttion to delete quality control variable after processing. Since
the data values can not be determined after they are set to NaN
and xarray method processing would also process the quality control
variables, the default is to remove the quality control data
variables. If numpy masked arrays are used the data are not lost
but would need to be extracted and set to DataArray to return the
dataset back to original state.
Examples
--------
.. code-block:: python
from act.io.armfiles import read_netcdf
from act.tests import EXAMPLE_MET1
ds = read_netcdf(EXAMPLE_MET1)
ds.clean.cleanup()
var_name = 'atmos_pressure'
ds_1 = ds.mean()
ds.qcfilter.add_less_test(var_name, 99, test_assessment='Bad')
ds.qcfilter.datafilter(rm_assessments='Bad')
ds_2 = ds.mean()
print(f'All data: {ds_1[var_name].values}, Bad Removed: {ds_2[var_name].values}')
All data: 98.86097717285156, Bad Removed: 99.15148162841797
"""
if variables is not None and isinstance(variables, str):
variables = [variables]
if variables is None:
variables = list(self._obj.data_vars)
for var_name in variables:
qc_var_name = self.check_for_ancillary_qc(var_name,
add_if_missing=False,
cleanup=False)
if qc_var_name is None:
if verbose:
print(f'No quality control variable for {var_name} found '
f'in call to .qcfilter.datafilter()')
continue
data = self.get_masked_data(var_name, rm_assessments=rm_assessments,
rm_tests=rm_tests, ma_fill_value=np_ma)
self._obj[var_name].values = data
if del_qc_var:
del self._obj[qc_var_name]
if verbose:
print(f'Deleting {qc_var_name} from dataset')
def set_bit(array, bit_number):
"""
Function to set a quality control bit given an a scalar or
array of values and a bit number.
Parameters
----------
array : int or numpy array
The bitpacked array to set the bit number.
bit_number : int
The bit (or test) number to set.
Returns
-------
array : int, numpy array, tuple, list
Integer or numpy array with bit set for each element of the array.
Returned in same type.
Examples
--------
Example use setting bit 2 to an array called data:
.. code-block:: python
data = np.array(range(0, 7))
data = set_bit(data, 2)
print(data)
array([2, 3, 2, 3, 6, 7, 6])
"""
was_list = False
was_tuple = False
if isinstance(array, list):
array = np.array(array)
was_list = True
if isinstance(array, tuple):
array = np.array(array)
was_tuple = True
if bit_number > 0:
array |= (1 << bit_number - 1)
if was_list:
array = list(array)
if was_tuple:
array = tuple(array)
return array
def unset_bit(array, bit_number):
"""
Function to remove a quality control bit given a
scalar or array of values and a bit number.
Parameters
----------
array : int or numpy array
Array of integers containing bit packed numbers.
bit_number : int
Bit number to remove.
Returns
-------
array : int or numpy array
Returns same data type as array entered with bit removed. Will
fail gracefully if the bit requested to be removed was not set.
Examples
--------
Example use removing bit 2 from an array called data:
> data = set_bit(0,2)
> data = set_bit(data,3)
> data
6
> data = unset_bit(data,2)
> data
4
"""
was_list = False
was_tuple = False
if isinstance(array, list):
array = np.array(array)
was_list = True
if isinstance(array, tuple):
array = np.array(array)
was_tuple = True
if bit_number > 0:
array = array & ~ (1 << bit_number - 1)
if was_list:
array = list(array)
if was_tuple:
array = tuple(array)
return array
def parse_bit(qc_bit):
"""
Given a single integer value, return bit positions.
Parameters
----------
qc_bit : int or numpy int
Bit packed integer number to be parsed.
Returns
-------
bit_number : numpy.int32 array
Array containing all bit numbers of the bit packed number.
If no bits set returns empty array.
Examples
--------
> parse_bit(7)
array([1, 2, 3])
"""
if isinstance(qc_bit, (list, tuple, np.ndarray)):
if len(qc_bit) > 1:
raise ValueError("Must be a single value.")
qc_bit = qc_bit[0]
if qc_bit < 0:
raise ValueError("Must be a positive integer.")
bit_number = []
# if qc_bit == 0:
# bit_number.append(0)
counter = 0
while qc_bit > 0:
temp_value = qc_bit % 2
qc_bit = qc_bit >> 1
counter += 1
if temp_value == 1:
bit_number.append(counter)
bit_number = np.asarray(bit_number, dtype=np.int32)
return bit_number
<|code_end|>
|
Add Google Analytics ID
Add a Google Analytics ID to the `conf.py` file used by sphinx. For those interested in having access the analytics, you will need to send over your gmail address
Fixes #396
|
docs/source/conf.py
<|code_start|>#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Atmospheric data Community Toolkit documentation build configuration file, created by
# sphinx-quickstart on Thu Jun 28 12:35:56 2018.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.githubpages',
'sphinx.ext.intersphinx',
'sphinx.ext.mathjax',
'sphinx.ext.viewcode',
'IPython.sphinxext.ipython_directive',
'IPython.sphinxext.ipython_console_highlighting',
'matplotlib.sphinxext.plot_directive',
'sphinx_copybutton',
'sphinx_gallery.gen_gallery',
'sphinx.ext.napoleon',
]
exclude_patterns = ['_build', '**.ipynb_checkpoints']
sphinx_gallery_conf = {
'examples_dirs': '../../examples',
'gallery_dirs': 'source/auto_examples'
}
# Configuration options for plot_directive. See:
# https://github.com/matplotlib/matplotlib/blob/f3ed922d935751e08494e5fb5311d3050a3b637b/lib/matplotlib/sphinxext/plot_directive.py#L81
plot_html_show_source_link = False
plot_html_show_formats = False
# Generate the API documentation when building
autoclass_content = "both"
autosummary_generate = True
autosummary_imported_members = True
# Otherwise, the Return parameter list looks different from the Parameter list
napoleon_use_rtype = False
napoleon_use_ivar = True
napoleon_include_init_with_doc = False
napoleon_use_param = False
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'Atmospheric data Community Toolkit'
copyright = '2018, ACT Developers'
author = 'ACT Developers'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
import act
# The short X.Y version.
version = act.__version__
# The full version, including alpha/beta/rc tags.
release = act.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
import sphinx_rtd_theme
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# This is required for the alabaster theme
# refs: http://alabaster.readthedocs.io/en/latest/installation.html#sidebars
html_sidebars = {
'**': [
'relations.html', # needs 'show_related': True theme option to display
'searchbox.html',
]
}
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'act'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'act.tex', 'Atmospheric data Community Toolkit Documentation',
'Contributors', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'act', 'Atmospheric data Community Toolkit Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'act', 'Atmospheric data Community Toolkit Documentation',
author, 'act', 'Package for connecting users to the data',
'Miscellaneous'),
]
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {
'python': ('https://docs.python.org/3/', None),
'numpy': ('https://docs.scipy.org/doc/numpy/', None),
'scipy': ('https://docs.scipy.org/doc/scipy/reference/', None),
'pandas': ('https://pandas.pydata.org/pandas-docs/stable', None),
'matplotlib': ('https://matplotlib.org', None),
}
<|code_end|>
|
docs/source/conf.py
<|code_start|>#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Atmospheric data Community Toolkit documentation build configuration file, created by
# sphinx-quickstart on Thu Jun 28 12:35:56 2018.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.githubpages',
'sphinx.ext.intersphinx',
'sphinx.ext.mathjax',
'sphinx.ext.viewcode',
'IPython.sphinxext.ipython_directive',
'IPython.sphinxext.ipython_console_highlighting',
'matplotlib.sphinxext.plot_directive',
'sphinx_copybutton',
'sphinx_gallery.gen_gallery',
'sphinx.ext.napoleon',
]
exclude_patterns = ['_build', '**.ipynb_checkpoints']
sphinx_gallery_conf = {
'examples_dirs': '../../examples',
'gallery_dirs': 'source/auto_examples'
}
# Configuration options for plot_directive. See:
# https://github.com/matplotlib/matplotlib/blob/f3ed922d935751e08494e5fb5311d3050a3b637b/lib/matplotlib/sphinxext/plot_directive.py#L81
plot_html_show_source_link = False
plot_html_show_formats = False
# Generate the API documentation when building
autoclass_content = "both"
autosummary_generate = True
autosummary_imported_members = True
# Otherwise, the Return parameter list looks different from the Parameter list
napoleon_use_rtype = False
napoleon_use_ivar = True
napoleon_include_init_with_doc = False
napoleon_use_param = False
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'Atmospheric data Community Toolkit'
copyright = '2018, ACT Developers'
author = 'ACT Developers'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
import act
# The short X.Y version.
version = act.__version__
# The full version, including alpha/beta/rc tags.
release = act.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
import sphinx_rtd_theme
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
html_theme_options = {
'google_analytics_id': 'UA-179020619-3',
}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# This is required for the alabaster theme
# refs: http://alabaster.readthedocs.io/en/latest/installation.html#sidebars
html_sidebars = {
'**': [
'relations.html', # needs 'show_related': True theme option to display
'searchbox.html',
]
}
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'act'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'act.tex', 'Atmospheric data Community Toolkit Documentation',
'Contributors', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'act', 'Atmospheric data Community Toolkit Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'act', 'Atmospheric data Community Toolkit Documentation',
author, 'act', 'Package for connecting users to the data',
'Miscellaneous'),
]
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {
'python': ('https://docs.python.org/3/', None),
'numpy': ('https://docs.scipy.org/doc/numpy/', None),
'scipy': ('https://docs.scipy.org/doc/scipy/reference/', None),
'pandas': ('https://pandas.pydata.org/pandas-docs/stable', None),
'matplotlib': ('https://matplotlib.org', None),
}
<|code_end|>
|
QC bitwise comparison of float values
Some xarray methods may cast the QC variables from type int to type float. For example .resample() will convert all variables to float type. This causes issues when performing bitwise comparisons of QC variables. In particular
in function get_qc_test_mask()
check_bit = set_bit(0, test_number) & qc_variable
Need to check variable type and cast to int before performing bitwise comparison.
|
act/qc/qcfilter.py
<|code_start|>"""
Functions and methods for creating ancillary quality control variables
and filters (masks) which can be used with various corrections
routines in ACT.
"""
import numpy as np
import xarray as xr
import dask
from act.qc import qctests, comparison_tests
@xr.register_dataset_accessor('qcfilter')
class QCFilter(qctests.QCTests, comparison_tests.QCTests, object):
"""
A class for building quality control variables containing arrays for
filtering data based on a set of test condition typically based on the
values in the data fields. These filters can be used in various
algorithms and calculations within ACT.
"""
def __init__(self, xarray_obj):
""" initialize """
self._obj = xarray_obj
def check_for_ancillary_qc(self, var_name, add_if_missing=True,
cleanup=False, flag_type=False):
"""
Method to check if a quality control variable exist in the dataset
and return the quality control varible name.
Will call create_qc_variable() to make variable if does not exist
and update_ancillary_variable() to ensure linkage between data and
quality control variable. Can also be used just to get the
corresponding quality control variable name with adding if
it is missing.
Parameters
----------
var_name : str
Data variable name.
add_if_missing : boolean
Add quality control variable if missing from object.
cleanup : boolean
Option to run qc.clean.cleanup() method on the object
to ensure the object was updated from ARM QC to the
correct standardized QC.
flag_type : boolean
Indicating the QC variable uses flag_values instead of
flag_masks.
Returns
-------
qc_var_name : str or None
Name of existing or new quality control variable. Returns
None if no existing quality control variable is found and
add_if_missing is set to False.
"""
qc_var_name = None
try:
ancillary_variables = \
self._obj[var_name].attrs['ancillary_variables']
if isinstance(ancillary_variables, str):
ancillary_variables = ancillary_variables.split()
for var in ancillary_variables:
for attr, value in self._obj[var].attrs.items():
if attr == 'standard_name' and 'quality_flag' in value:
qc_var_name = var
if add_if_missing and qc_var_name is None:
qc_var_name = self._obj.qcfilter.create_qc_variable(
var_name, flag_type=flag_type)
except KeyError:
# Since no ancillary_variables exist look for ARM style of QC
# variable name. If it exists use it else create new
# QC varaible.
if add_if_missing:
try:
self._obj['qc_' + var_name]
qc_var_name = 'qc_' + var_name
except KeyError:
qc_var_name = self._obj.qcfilter.create_qc_variable(
var_name, flag_type=flag_type)
# Make sure data varaible has a variable attribute linking
# data variable to QC variable.
if add_if_missing:
self._obj.qcfilter.update_ancillary_variable(var_name, qc_var_name)
# Clean up quality control variables to the requried standard in the
# xarray object.
if cleanup:
self._obj.clean.cleanup(handle_missing_value=True,
link_qc_variables=False)
return qc_var_name
def create_qc_variable(self, var_name, flag_type=False,
flag_values_set_value=0,
qc_var_name=None):
"""
Method to create a quality control variable in the dataset.
Will try not to destroy the qc variable by appending numbers
to the variable name if needed.
Parameters
----------
var_name : str
Data variable name.
flag_type : boolean
If an integer flag type should be created instead of
bitpacked mask type. Will create flag_values instead of
flag_masks.
flag_values_set_value : int
Initial flag value to use when initializing array.
qc_var_name : str
Optional new quality control variable name. If not set
will create one using \\"qc\\_\\" prepended to the data
variable name. If the name given or created is taken
will append a number that does not have a conflict.
Returns
-------
qc_var_name : str
Name of new quality control variable created.
"""
# Make QC variable long name. The variable long_name attribute
# may not exist so catch that error and set to default.
try:
qc_variable_long_name = ('Quality check results on field: ' +
self._obj[var_name].attrs['long_name'])
except KeyError:
qc_variable_long_name = 'Quality check results for ' + var_name
# Make a new quality control variable name. Check if exists in the
# dataset. If so loop through creation of new name until one is
# found that will not replace existing variable.
if qc_var_name is None:
qc_var_name = 'qc_' + var_name
variable_names = list(self._obj.data_vars)
if qc_var_name in variable_names:
for ii in range(1, 100):
temp_qc_var_name = '_'.join([qc_var_name, str(ii)])
if temp_qc_var_name not in variable_names:
qc_var_name = temp_qc_var_name
break
# Create the QC variable filled with 0 values matching the
# shape of data variable.
try:
qc_data = dask.array.from_array(
np.zeros_like(self._obj[var_name].values, dtype=np.int32),
chunks=self._obj[var_name].data.chunksize)
except AttributeError:
qc_data = np.zeros_like(self._obj[var_name].values, dtype=np.int32)
# Updating to use coords instead of dim, which caused a loss of
# attribuets as noted in Issue 347
self._obj[qc_var_name] = xr.DataArray(
data=qc_data, coords=self._obj[var_name].coords,
attrs={"long_name": qc_variable_long_name,
"units": '1'}
)
# Update if using flag_values and don't want 0 to be default value.
if flag_type and flag_values_set_value != 0:
self._obj[qc_var_name].values = \
self._obj[qc_var_name].values + int(flag_values_set_value)
# Add requried variable attributes.
if flag_type:
self._obj[qc_var_name].attrs['flag_values'] = []
else:
self._obj[qc_var_name].attrs['flag_masks'] = []
self._obj[qc_var_name].attrs['flag_meanings'] = []
self._obj[qc_var_name].attrs['flag_assessments'] = []
self._obj[qc_var_name].attrs['standard_name'] = 'quality_flag'
self.update_ancillary_variable(var_name, qc_var_name=qc_var_name)
return qc_var_name
def update_ancillary_variable(self, var_name, qc_var_name=None):
"""
Method to check if ancillary_variables variable attribute
is set with quality control variable name.
Parameters
----------
var_name : str
Data variable name.
qc_var_name : str
quality control variable name. If not given will attempt
to get the name from data variable ancillary_variables
attribute.
"""
if qc_var_name is None:
qc_var_name = self._obj.qcfilter.check_for_ancillary_qc(
var_name, add_if_missing=False)
if qc_var_name is None:
return
try:
ancillary_variables = \
self._obj[var_name].attrs['ancillary_variables']
if qc_var_name not in ancillary_variables:
ancillary_variables = ' '.join([ancillary_variables,
qc_var_name])
except KeyError:
ancillary_variables = qc_var_name
self._obj[var_name].attrs['ancillary_variables'] = ancillary_variables
def add_test(self, var_name, index=None, test_number=None,
test_meaning=None, test_assessment='Bad',
flag_value=False, recycle=False):
"""
Method to add a new test/filter to a quality control variable.
Parameters
----------
var_name : str
data variable name
index : int, bool, list of int or bool, numpy array, tuple of numpy arrays
Indexes into quality control array to set the test bit.
If not set or set to None will not set the test on any
element of the quality control variable but will still
add the test to the flag_masks, flag_meanings and
flag_assessments attributes.
test_number : int
Test number to use. If keyword is not set will use first
available test bit/test number.
recyle : boolean
Option to use number less than next highest test if available. For example
tests 1, 2, 4, 5 are set. Set to true the next test chosen will be 3, else
will be 6.
test_meaning : str
String describing the test. Will be added to flag_meanings
variable attribute.
test_assessment : str
String describing the test assessment. If not set will use
"Bad" as the string to append to flag_assessments. Will
update to be lower case and then capitalized.
flag_value : boolean
Switch to use flag_values integer quality control.
Returns
-------
test_dict : dict
A dictionary containing information added to the QC
variable.
Examples
--------
> result = ds_object.qcfilter.add_test(
var_name, test_meaning='Birds!')
"""
test_dict = {}
if test_meaning is None:
raise ValueError('You need to provide a value for test_meaning '
'keyword when calling the add_test method')
# This ensures the indexing will work even if given float values.
# Preserves tuples from np.where() or boolean arrays for standard
# python indexing.
if index is not None and not isinstance(index, (np.ndarray, tuple)):
index = np.array(index)
if index.dtype.kind == 'f':
index = index.astype(int)
# Ensure assessment is lowercase and capitalized to be consistent
test_assessment = test_assessment.lower().capitalize()
qc_var_name = self._obj.qcfilter.check_for_ancillary_qc(
var_name, flag_type=flag_value)
if test_number is None:
test_number = self._obj.qcfilter.available_bit(
qc_var_name, recycle=recycle)
self._obj.qcfilter.set_test(var_name, index, test_number, flag_value)
if flag_value:
try:
self._obj[qc_var_name].attrs['flag_values'].append(test_number)
except KeyError:
self._obj[qc_var_name].attrs['flag_values'] = [test_number]
else:
# Determine if flag_masks test number is too large for current data type.
# If so up convert data type.
flag_masks = np.array(self._obj[qc_var_name].attrs['flag_masks'])
mask_dtype = flag_masks.dtype
if not np.issubdtype(mask_dtype, np.integer):
mask_dtype = np.uint32
if np.iinfo(mask_dtype).max - set_bit(0, test_number) <= -1:
if mask_dtype == np.int8 or mask_dtype == np.uint8:
mask_dtype = np.uint16
elif mask_dtype == np.int16 or mask_dtype == np.uint16:
mask_dtype = np.uint32
elif mask_dtype == np.int32 or mask_dtype == np.uint32:
mask_dtype = np.uint64
flag_masks = flag_masks.astype(mask_dtype)
flag_masks = np.append(flag_masks, np.array(set_bit(0, test_number), dtype=mask_dtype))
self._obj[qc_var_name].attrs['flag_masks'] = list(flag_masks)
try:
self._obj[qc_var_name].attrs['flag_meanings'].append(test_meaning)
except KeyError:
self._obj[qc_var_name].attrs['flag_meanings'] = [test_meaning]
try:
self._obj[qc_var_name].attrs['flag_assessments'].append(test_assessment)
except KeyError:
self._obj[qc_var_name].attrs['flag_assessments'] = [test_assessment]
test_dict['test_number'] = test_number
test_dict['test_meaning'] = test_meaning
test_dict['test_assessment'] = test_assessment
test_dict['qc_variable_name'] = qc_var_name
test_dict['variable_name'] = var_name
return test_dict
def remove_test(self, var_name=None, test_number=None, qc_var_name=None, flag_value=False,
flag_values_reset_value=0):
"""
Method to remove a test/filter from a quality control variable. Must set
var_name or qc_var_name.
Parameters
----------
var_name : str or None
Data variable name.
test_number : int
Test number to remove.
qc_var_name : str or None
Quality control variable name. Ignored if var_name is set.
flag_value : boolean
Switch to use flag_values integer quality control.
flag_values_reset_value : int
Value to use when resetting a flag_values value to not be set.
Examples
--------
> ds_object.qcfilter.remove_test(
var_name, test_number=3)
"""
if test_number is None:
raise ValueError('You need to provide a value for test_number '
'keyword when calling the remove_test() method')
if var_name is None and qc_var_name is None:
raise ValueError('You need to provide a value for var_name or qc_var_name '
'keyword when calling the remove_test() method')
if var_name is not None:
qc_var_name = self._obj.qcfilter.check_for_ancillary_qc(var_name)
# Determine which index is using the test number
index = None
if flag_value:
flag_values = self._obj[qc_var_name].attrs['flag_values']
for ii, flag_num in enumerate(flag_values):
if flag_num == test_number:
index = ii
break
else:
flag_masks = self._obj[qc_var_name].attrs['flag_masks']
for ii, bit_num in enumerate(flag_masks):
if parse_bit(bit_num)[0] == test_number:
index = ii
break
# If can't find the index of test return before doing anything.
if index is None:
return
if flag_value:
remove_index = self._obj.qcfilter.get_qc_test_mask(
var_name=var_name, qc_var_name=qc_var_name, test_number=test_number,
return_index=True, flag_value=True)
self._obj.qcfilter.unset_test(var_name=var_name, qc_var_name=qc_var_name,
index=remove_index, test_number=test_number,
flag_value=flag_value,
flag_values_reset_value=flag_values_reset_value)
del flag_values[index]
self._obj[qc_var_name].attrs['flag_values'] = flag_values
else:
remove_index = self._obj.qcfilter.get_qc_test_mask(
var_name=var_name, qc_var_name=qc_var_name, test_number=test_number,
return_index=True)
self._obj.qcfilter.unset_test(var_name=var_name, qc_var_name=qc_var_name,
index=remove_index, test_number=test_number,
flag_value=flag_value)
del flag_masks[index]
self._obj[qc_var_name].attrs['flag_masks'] = flag_masks
flag_meanings = self._obj[qc_var_name].attrs['flag_meanings']
del flag_meanings[index]
self._obj[qc_var_name].attrs['flag_meanings'] = flag_meanings
flag_assessments = self._obj[qc_var_name].attrs['flag_assessments']
del flag_assessments[index]
self._obj[qc_var_name].attrs['flag_assessments'] = flag_assessments
def set_test(self, var_name, index=None, test_number=None,
flag_value=False):
"""
Method to set a test/filter in a quality control variable.
Parameters
----------
var_name : str
Data variable name.
index : int or list or numpy array
Index to set test in quality control array. If want to
unset all values will need to pass in index of all values.
test_number : int
Test number to set.
flag_value : boolean
Switch to use flag_values integer quality control.
Examples
--------
.. code-block:: python
index = [0, 1, 2, 30]
ds_object.qcfilter.set_test(
var_name, index=index, test_number=2)
"""
qc_var_name = self._obj.qcfilter.check_for_ancillary_qc(var_name)
qc_variable = np.array(self._obj[qc_var_name].values)
# Determine if test number is too large for current data type. If so
# up convert data type.
dtype = qc_variable.dtype
if np.iinfo(dtype).max - set_bit(0, test_number) < -1:
if dtype == np.int8:
dtype = np.int16
elif dtype == np.int16:
dtype = np.int32
elif dtype == np.int32:
dtype = np.int64
qc_variable = qc_variable.astype(dtype)
if index is not None:
if flag_value:
qc_variable[index] = test_number
else:
qc_variable[index] = set_bit(qc_variable[index], test_number)
self._obj[qc_var_name].values = qc_variable
def unset_test(self, var_name=None, qc_var_name=None, index=None, test_number=None,
flag_value=False, flag_values_reset_value=0):
"""
Method to unset a test/filter from a quality control variable.
Parameters
----------
var_name : str or None
Data variable name.
qc_var_name : str or None
Quality control variable name. Ignored if var_name is set.
index : int or list or numpy array
Index to unset test in quality control array. If want to
unset all values will need to pass in index of all values.
test_number : int
Test number to remove.
flag_value : boolean
Switch to use flag_values integer quality control.
flag_values_reset_value : int
Value to use when resetting a flag_values value to not be set.
Examples
--------
.. code-block:: python
ds_object.qcfilter.unset_test(
var_name, index=0, test_number=2)
"""
if index is None:
return
if var_name is None and qc_var_name is None:
raise ValueError('You need to provide a value for var_name or qc_var_name '
'keyword when calling the unset_test() method')
if var_name is not None:
qc_var_name = self._obj.qcfilter.check_for_ancillary_qc(var_name)
qc_variable = self._obj[qc_var_name].values
if flag_value:
qc_variable[index] = flag_values_reset_value
else:
qc_variable[index] = unset_bit(qc_variable[index], test_number)
self._obj[qc_var_name].values = qc_variable
def available_bit(self, qc_var_name, recycle=False):
"""
Method to determine next available bit or flag to use with a QC test.
Will check for flag_masks first and if not found will check for
flag_values. This will drive how the next value is chosen.
Parameters
----------
qc_var_name : str
Quality control variable name.
recycle : boolean
Option to look for a bit (test) not in use starting from 1.
If a test is not defined will return the lowest number, else
will just use next highest number.
Returns
-------
test_num : int
Next available test number.
"""
try:
flag_masks = self._obj[qc_var_name].attrs['flag_masks']
flag_value = False
except KeyError:
try:
flag_masks = self._obj[qc_var_name].attrs['flag_values']
flag_value = True
except KeyError:
try:
self._obj[qc_var_name].attrs['flag_values']
flag_masks = self._obj[qc_var_name].attrs['flag_masks']
flag_value = False
except KeyError:
raise ValueError('Problem getting next value from '
'available_bit(). flag_values and '
'flag_masks not set as expected')
if flag_masks == []:
next_bit = 1
else:
if flag_value:
if recycle:
next_bit = min(set(range(1, 100000)) - set(flag_masks))
else:
next_bit = max(flag_masks) + 1
else:
if recycle:
tests = [parse_bit(mask)[0] for mask in flag_masks]
next_bit = min(set(range(1, 63)) - set(tests))
else:
next_bit = parse_bit(max(flag_masks))[0] + 1
return int(next_bit)
def get_qc_test_mask(self, var_name=None, test_number=None, qc_var_name=None,
flag_value=False, return_index=False):
"""
Returns a numpy array of False or True where a particular
flag or bit is set in a numpy array. Must set var_name or qc_var_name
when calling.
Parameters
----------
var_name : str or None
Data variable name.
test_number : int
Test number to return array where test is set.
qc_var_name : str or None
Quality control variable name. Ignored if var_name is set.
flag_value : boolean
Switch to use flag_values integer quality control.
return_index : boolean
Return a numpy array of index numbers into QC array where the
test is set instead of 0 or 1 mask.
Returns
-------
test_mask : bool array
A numpy boolean array with False or True where the test number or
bit was set.
Examples
--------
.. code-block:: python
from act.io.armfiles import read_netcdf
from act.tests import EXAMPLE_IRT25m20s
ds_object = read_netcdf(EXAMPLE_IRT25m20s)
var_name = 'inst_up_long_dome_resist'
result = ds_object.qcfilter.add_test(
var_name, index=[0, 1, 2], test_meaning='Birds!')
qc_var_name = result['qc_variable_name']
mask = ds_object.qcfilter.get_qc_test_mask(
var_name, result['test_number'], return_index=True)
print(mask)
array([0, 1, 2])
mask = ds_object.qcfilter.get_qc_test_mask(
var_name, result['test_number'])
print(mask)
array([ True, True, True, ..., False, False, False])
data = ds_object[var_name].values
print(data[mask])
array([7.84 , 7.8777, 7.8965], dtype=float32)
import numpy as np
data[mask] = np.nan
print(data)
array([ nan, nan, nan, ..., 7.6705, 7.6892, 7.6892],
dtype=float32)
"""
if var_name is None and qc_var_name is None:
raise ValueError('You need to provide a value for var_name or qc_var_name '
'keyword when calling the get_qc_test_mask() method')
if test_number is None:
raise ValueError('You need to provide a value for test_number '
'keyword when calling the get_qc_test_mask() method')
if var_name is not None:
qc_var_name = self._obj.qcfilter.check_for_ancillary_qc(var_name)
qc_variable = self._obj[qc_var_name].values
if flag_value:
tripped = np.where(qc_variable == test_number)
else:
check_bit = set_bit(0, test_number) & qc_variable
tripped = np.where(check_bit > 0)
test_mask = np.zeros(qc_variable.shape, dtype='int')
# Make sure test_mask is an array. If qc_variable is scalar will
# be retuned from np.zeros as scalar.
test_mask = np.atleast_1d(test_mask)
test_mask[tripped] = 1
test_mask = np.ma.make_mask(test_mask, shrink=False)
if return_index:
test_mask = np.where(test_mask)[0]
return test_mask
def get_masked_data(self, var_name, rm_assessments=None,
rm_tests=None, return_nan_array=False,
ma_fill_value=None, return_inverse=False):
"""
Returns a numpy masked array containing data and mask or
a numpy float array with masked values set to NaN.
Parameters
----------
var_name : str
Data variable name.
rm_assessments : str or list of str
Assessment name to exclude from returned data.
rm_tests : int or list of int
Test numbers to exclude from returned data. This is the test
number (or bit position number) not the mask number.
return_nan_array : boolean
Return a numpy array with filtered ( or masked) values
set to numpy NaN value. If the data is type int will upconvert
to numpy float to allow setting NaN value.
ma_fill_value : int or float (or str?)
The numpy masked array fill_value used in creation of the the
masked array. If the datatype needs to be upconverted to allow
the fill value to be used, data will be upconverted.
return_inverse : boolean
Invert the masked array mask or return data array where mask is set
to False instead of True set to NaN. Useful for overplotting
where failing.
Returns
-------
variable : numpy masked array or numpy float array
Default is to return a numpy masked array with the mask set to
True where the test with requested assessment or test number
was found set.
If return_nan_array is True will return numpy array upconverted
to float with locations where the test with requested assessment
or test number was found set converted to NaN.
Examples
--------
.. code-block:: python
from act.io.armfiles import read_netcdf
from act.tests import EXAMPLE_IRT25m20s
ds_object = read_netcdf(EXAMPLE_IRT25m20s)
var_name = 'inst_up_long_dome_resist'
result = ds_object.qcfilter.add_test(
var_name, index=[0, 1, 2], test_meaning='Birds!')
data = ds_object.qcfilter.get_masked_data(var_name,
rm_assessments=['Bad', 'Indeterminate'])
print(data)
masked_array(data=[--, --, --, ..., 7.670499801635742,
7.689199924468994, 7.689199924468994],
mask=[ True, True, True, ..., False, False, False],
fill_value=1e+20, dtype=float32)
"""
qc_var_name = self._obj.qcfilter.check_for_ancillary_qc(
var_name, add_if_missing=False)
flag_value = False
flag_values = None
flag_masks = None
flag_assessments = None
try:
flag_assessments = self._obj[qc_var_name].attrs['flag_assessments']
flag_masks = self._obj[qc_var_name].attrs['flag_masks']
except KeyError:
pass
try:
flag_values = self._obj[qc_var_name].attrs['flag_values']
flag_value = True
except KeyError:
pass
test_numbers = []
if rm_tests is not None:
if isinstance(rm_tests, (int, float, str)):
rm_tests = [int(rm_tests)]
test_numbers.extend(rm_tests)
if rm_assessments is not None:
if isinstance(rm_assessments, str):
rm_assessments = [rm_assessments]
if flag_masks is not None:
test_nums = [parse_bit(mask)[0] for mask in flag_masks]
if flag_values is not None:
test_nums = flag_values
rm_assessments = [x.lower() for x in rm_assessments]
if flag_assessments is not None:
for ii, assessment in enumerate(flag_assessments):
if assessment.lower() in rm_assessments:
test_numbers.append(test_nums[ii])
# Make the list of test numbers to mask unique
test_numbers = list(set(test_numbers))
# Create mask of indexes by looking where each test is set
variable = self._obj[var_name].values
nan_dtype = np.float32
if variable.dtype in (np.float64, np.int64):
nan_dtype = np.float64
mask = np.zeros(variable.shape, dtype=bool)
for test in test_numbers:
mask = mask | self._obj.qcfilter.get_qc_test_mask(
var_name, test, flag_value=flag_value)
# Convert data numpy array into masked array
try:
variable = np.ma.array(variable, mask=mask,
fill_value=ma_fill_value)
except TypeError:
variable = np.ma.array(variable, mask=mask,
fill_value=ma_fill_value,
dtype=np.array(ma_fill_value).dtype)
# If requested switch array from where data is not failing tests
# to where data is failing tests. This can be used when over plotting
# where the data if failing the tests.
if return_inverse:
mask = variable.mask
mask = np.invert(mask)
variable.mask = mask
# If asked to return numpy array with values set to NaN
if return_nan_array:
variable = variable.astype(nan_dtype)
variable = variable.filled(fill_value=np.nan)
return variable
def datafilter(self, variables=None, rm_assessments=None, rm_tests=None,
np_ma=True, verbose=False, del_qc_var=True):
"""
Method to apply quality control variables to data variables by
changing the data values in the dataset using quality control variables.
The data variable is changed to to a numpy masked array with failing
data masked or, if requested, to numpy array with failing data set to
NaN. This can be used to update the data variable in the xarray
dataset for use with xarray methods to perform analysis on the data
since those methods don't read the quality control variables.
Parameters
----------
variables : None or str or list of str
Data variable names to process
rm_assessments : str or list of str
Assessment names listed under quality control varible flag_assessments
to exclude from returned data. Examples include
['Bad', 'Incorrect', 'Indeterminate', 'Suspect']
rm_tests : int or list of int
Test numbers listed under quality control variable to exclude from
returned data. This is the test
number (or bit position number) not the mask number.
np_ma : boolean
Shoudl the data in the xarray DataArray be set to numpy masked
arrays. This should work with most xarray methods. If the xarray
processing method does not work with numpy masked array set to
False to use NaN.
verbose : boolean
Print processing information.
del_qc_var : boolean
Opttion to delete quality control variable after processing. Since
the data values can not be determined after they are set to NaN
and xarray method processing would also process the quality control
variables, the default is to remove the quality control data
variables. If numpy masked arrays are used the data are not lost
but would need to be extracted and set to DataArray to return the
dataset back to original state.
Examples
--------
.. code-block:: python
from act.io.armfiles import read_netcdf
from act.tests import EXAMPLE_MET1
ds = read_netcdf(EXAMPLE_MET1)
ds.clean.cleanup()
var_name = 'atmos_pressure'
ds_1 = ds.mean()
ds.qcfilter.add_less_test(var_name, 99, test_assessment='Bad')
ds.qcfilter.datafilter(rm_assessments='Bad')
ds_2 = ds.mean()
print(f'All data: {ds_1[var_name].values}, Bad Removed: {ds_2[var_name].values}')
All data: 98.86097717285156, Bad Removed: 99.15148162841797
"""
if variables is not None and isinstance(variables, str):
variables = [variables]
if variables is None:
variables = list(self._obj.data_vars)
for var_name in variables:
qc_var_name = self.check_for_ancillary_qc(var_name,
add_if_missing=False,
cleanup=False)
if qc_var_name is None:
if verbose:
print(f'No quality control variable for {var_name} found '
f'in call to .qcfilter.datafilter()')
continue
data = self.get_masked_data(var_name, rm_assessments=rm_assessments,
rm_tests=rm_tests, ma_fill_value=np_ma)
self._obj[var_name].values = data
if del_qc_var:
del self._obj[qc_var_name]
if verbose:
print(f'Deleting {qc_var_name} from dataset')
def set_bit(array, bit_number):
"""
Function to set a quality control bit given an a scalar or
array of values and a bit number.
Parameters
----------
array : int or numpy array
The bitpacked array to set the bit number.
bit_number : int
The bit (or test) number to set.
Returns
-------
array : int, numpy array, tuple, list
Integer or numpy array with bit set for each element of the array.
Returned in same type.
Examples
--------
Example use setting bit 2 to an array called data:
.. code-block:: python
data = np.array(range(0, 7))
data = set_bit(data, 2)
print(data)
array([2, 3, 2, 3, 6, 7, 6])
"""
was_list = False
was_tuple = False
if isinstance(array, list):
array = np.array(array)
was_list = True
if isinstance(array, tuple):
array = np.array(array)
was_tuple = True
if bit_number > 0:
array |= (1 << bit_number - 1)
if was_list:
array = list(array)
if was_tuple:
array = tuple(array)
return array
def unset_bit(array, bit_number):
"""
Function to remove a quality control bit given a
scalar or array of values and a bit number.
Parameters
----------
array : int or numpy array
Array of integers containing bit packed numbers.
bit_number : int
Bit number to remove.
Returns
-------
array : int or numpy array
Returns same data type as array entered with bit removed. Will
fail gracefully if the bit requested to be removed was not set.
Examples
--------
Example use removing bit 2 from an array called data:
> data = set_bit(0,2)
> data = set_bit(data,3)
> data
6
> data = unset_bit(data,2)
> data
4
"""
was_list = False
was_tuple = False
if isinstance(array, list):
array = np.array(array)
was_list = True
if isinstance(array, tuple):
array = np.array(array)
was_tuple = True
if bit_number > 0:
array = array & ~ (1 << bit_number - 1)
if was_list:
array = list(array)
if was_tuple:
array = tuple(array)
return array
def parse_bit(qc_bit):
"""
Given a single integer value, return bit positions.
Parameters
----------
qc_bit : int or numpy int
Bit packed integer number to be parsed.
Returns
-------
bit_number : numpy.int32 array
Array containing all bit numbers of the bit packed number.
If no bits set returns empty array.
Examples
--------
> parse_bit(7)
array([1, 2, 3])
"""
if isinstance(qc_bit, (list, tuple, np.ndarray)):
if len(qc_bit) > 1:
raise ValueError("Must be a single value.")
qc_bit = qc_bit[0]
if qc_bit < 0:
raise ValueError("Must be a positive integer.")
bit_number = []
qc_bit = int(qc_bit)
counter = 0
while qc_bit > 0:
temp_value = qc_bit % 2
qc_bit = qc_bit >> 1
counter += 1
if temp_value == 1:
bit_number.append(counter)
bit_number = np.asarray(bit_number, dtype=np.int32)
return bit_number
<|code_end|>
|
act/qc/qcfilter.py
<|code_start|>"""
Functions and methods for creating ancillary quality control variables
and filters (masks) which can be used with various corrections
routines in ACT.
"""
import numpy as np
import xarray as xr
import dask
from act.qc import qctests, comparison_tests
@xr.register_dataset_accessor('qcfilter')
class QCFilter(qctests.QCTests, comparison_tests.QCTests, object):
"""
A class for building quality control variables containing arrays for
filtering data based on a set of test condition typically based on the
values in the data fields. These filters can be used in various
algorithms and calculations within ACT.
"""
def __init__(self, xarray_obj):
""" initialize """
self._obj = xarray_obj
def check_for_ancillary_qc(self, var_name, add_if_missing=True,
cleanup=False, flag_type=False):
"""
Method to check if a quality control variable exist in the dataset
and return the quality control varible name.
Will call create_qc_variable() to make variable if does not exist
and update_ancillary_variable() to ensure linkage between data and
quality control variable. Can also be used just to get the
corresponding quality control variable name with adding if
it is missing.
Parameters
----------
var_name : str
Data variable name.
add_if_missing : boolean
Add quality control variable if missing from object.
cleanup : boolean
Option to run qc.clean.cleanup() method on the object
to ensure the object was updated from ARM QC to the
correct standardized QC.
flag_type : boolean
Indicating the QC variable uses flag_values instead of
flag_masks.
Returns
-------
qc_var_name : str or None
Name of existing or new quality control variable. Returns
None if no existing quality control variable is found and
add_if_missing is set to False.
"""
qc_var_name = None
try:
ancillary_variables = \
self._obj[var_name].attrs['ancillary_variables']
if isinstance(ancillary_variables, str):
ancillary_variables = ancillary_variables.split()
for var in ancillary_variables:
for attr, value in self._obj[var].attrs.items():
if attr == 'standard_name' and 'quality_flag' in value:
qc_var_name = var
if add_if_missing and qc_var_name is None:
qc_var_name = self._obj.qcfilter.create_qc_variable(
var_name, flag_type=flag_type)
except KeyError:
# Since no ancillary_variables exist look for ARM style of QC
# variable name. If it exists use it else create new
# QC varaible.
if add_if_missing:
try:
self._obj['qc_' + var_name]
qc_var_name = 'qc_' + var_name
except KeyError:
qc_var_name = self._obj.qcfilter.create_qc_variable(
var_name, flag_type=flag_type)
# Make sure data varaible has a variable attribute linking
# data variable to QC variable.
if add_if_missing:
self._obj.qcfilter.update_ancillary_variable(var_name, qc_var_name)
# Clean up quality control variables to the requried standard in the
# xarray object.
if cleanup:
self._obj.clean.cleanup(handle_missing_value=True,
link_qc_variables=False)
return qc_var_name
def create_qc_variable(self, var_name, flag_type=False,
flag_values_set_value=0,
qc_var_name=None):
"""
Method to create a quality control variable in the dataset.
Will try not to destroy the qc variable by appending numbers
to the variable name if needed.
Parameters
----------
var_name : str
Data variable name.
flag_type : boolean
If an integer flag type should be created instead of
bitpacked mask type. Will create flag_values instead of
flag_masks.
flag_values_set_value : int
Initial flag value to use when initializing array.
qc_var_name : str
Optional new quality control variable name. If not set
will create one using \\"qc\\_\\" prepended to the data
variable name. If the name given or created is taken
will append a number that does not have a conflict.
Returns
-------
qc_var_name : str
Name of new quality control variable created.
"""
# Make QC variable long name. The variable long_name attribute
# may not exist so catch that error and set to default.
try:
qc_variable_long_name = ('Quality check results on field: ' +
self._obj[var_name].attrs['long_name'])
except KeyError:
qc_variable_long_name = 'Quality check results for ' + var_name
# Make a new quality control variable name. Check if exists in the
# dataset. If so loop through creation of new name until one is
# found that will not replace existing variable.
if qc_var_name is None:
qc_var_name = 'qc_' + var_name
variable_names = list(self._obj.data_vars)
if qc_var_name in variable_names:
for ii in range(1, 100):
temp_qc_var_name = '_'.join([qc_var_name, str(ii)])
if temp_qc_var_name not in variable_names:
qc_var_name = temp_qc_var_name
break
# Create the QC variable filled with 0 values matching the
# shape of data variable.
try:
qc_data = dask.array.from_array(
np.zeros_like(self._obj[var_name].values, dtype=np.int32),
chunks=self._obj[var_name].data.chunksize)
except AttributeError:
qc_data = np.zeros_like(self._obj[var_name].values, dtype=np.int32)
# Updating to use coords instead of dim, which caused a loss of
# attribuets as noted in Issue 347
self._obj[qc_var_name] = xr.DataArray(
data=qc_data, coords=self._obj[var_name].coords,
attrs={"long_name": qc_variable_long_name,
"units": '1'}
)
# Update if using flag_values and don't want 0 to be default value.
if flag_type and flag_values_set_value != 0:
self._obj[qc_var_name].values = \
self._obj[qc_var_name].values + int(flag_values_set_value)
# Add requried variable attributes.
if flag_type:
self._obj[qc_var_name].attrs['flag_values'] = []
else:
self._obj[qc_var_name].attrs['flag_masks'] = []
self._obj[qc_var_name].attrs['flag_meanings'] = []
self._obj[qc_var_name].attrs['flag_assessments'] = []
self._obj[qc_var_name].attrs['standard_name'] = 'quality_flag'
self.update_ancillary_variable(var_name, qc_var_name=qc_var_name)
return qc_var_name
def update_ancillary_variable(self, var_name, qc_var_name=None):
"""
Method to check if ancillary_variables variable attribute
is set with quality control variable name.
Parameters
----------
var_name : str
Data variable name.
qc_var_name : str
quality control variable name. If not given will attempt
to get the name from data variable ancillary_variables
attribute.
"""
if qc_var_name is None:
qc_var_name = self._obj.qcfilter.check_for_ancillary_qc(
var_name, add_if_missing=False)
if qc_var_name is None:
return
try:
ancillary_variables = \
self._obj[var_name].attrs['ancillary_variables']
if qc_var_name not in ancillary_variables:
ancillary_variables = ' '.join([ancillary_variables,
qc_var_name])
except KeyError:
ancillary_variables = qc_var_name
self._obj[var_name].attrs['ancillary_variables'] = ancillary_variables
def add_test(self, var_name, index=None, test_number=None,
test_meaning=None, test_assessment='Bad',
flag_value=False, recycle=False):
"""
Method to add a new test/filter to a quality control variable.
Parameters
----------
var_name : str
data variable name
index : int, bool, list of int or bool, numpy array, tuple of numpy arrays
Indexes into quality control array to set the test bit.
If not set or set to None will not set the test on any
element of the quality control variable but will still
add the test to the flag_masks, flag_meanings and
flag_assessments attributes.
test_number : int
Test number to use. If keyword is not set will use first
available test bit/test number.
recyle : boolean
Option to use number less than next highest test if available. For example
tests 1, 2, 4, 5 are set. Set to true the next test chosen will be 3, else
will be 6.
test_meaning : str
String describing the test. Will be added to flag_meanings
variable attribute.
test_assessment : str
String describing the test assessment. If not set will use
"Bad" as the string to append to flag_assessments. Will
update to be lower case and then capitalized.
flag_value : boolean
Switch to use flag_values integer quality control.
Returns
-------
test_dict : dict
A dictionary containing information added to the QC
variable.
Examples
--------
> result = ds_object.qcfilter.add_test(
var_name, test_meaning='Birds!')
"""
test_dict = {}
if test_meaning is None:
raise ValueError('You need to provide a value for test_meaning '
'keyword when calling the add_test method')
# This ensures the indexing will work even if given float values.
# Preserves tuples from np.where() or boolean arrays for standard
# python indexing.
if index is not None and not isinstance(index, (np.ndarray, tuple)):
index = np.array(index)
if index.dtype.kind not in np.typecodes["AllInteger"]:
index = index.astype(int)
# Ensure assessment is lowercase and capitalized to be consistent
test_assessment = test_assessment.lower().capitalize()
qc_var_name = self._obj.qcfilter.check_for_ancillary_qc(
var_name, flag_type=flag_value)
if test_number is None:
test_number = self._obj.qcfilter.available_bit(
qc_var_name, recycle=recycle)
self._obj.qcfilter.set_test(var_name, index, test_number, flag_value)
if flag_value:
try:
self._obj[qc_var_name].attrs['flag_values'].append(test_number)
except KeyError:
self._obj[qc_var_name].attrs['flag_values'] = [test_number]
else:
# Determine if flag_masks test number is too large for current data type.
# If so up convert data type.
flag_masks = np.array(self._obj[qc_var_name].attrs['flag_masks'])
mask_dtype = flag_masks.dtype
if not np.issubdtype(mask_dtype, np.integer):
mask_dtype = np.uint32
if np.iinfo(mask_dtype).max - set_bit(0, test_number) <= -1:
if mask_dtype == np.int8 or mask_dtype == np.uint8:
mask_dtype = np.uint16
elif mask_dtype == np.int16 or mask_dtype == np.uint16:
mask_dtype = np.uint32
elif mask_dtype == np.int32 or mask_dtype == np.uint32:
mask_dtype = np.uint64
flag_masks = flag_masks.astype(mask_dtype)
flag_masks = np.append(flag_masks, np.array(set_bit(0, test_number), dtype=mask_dtype))
self._obj[qc_var_name].attrs['flag_masks'] = list(flag_masks)
try:
self._obj[qc_var_name].attrs['flag_meanings'].append(test_meaning)
except KeyError:
self._obj[qc_var_name].attrs['flag_meanings'] = [test_meaning]
try:
self._obj[qc_var_name].attrs['flag_assessments'].append(test_assessment)
except KeyError:
self._obj[qc_var_name].attrs['flag_assessments'] = [test_assessment]
test_dict['test_number'] = test_number
test_dict['test_meaning'] = test_meaning
test_dict['test_assessment'] = test_assessment
test_dict['qc_variable_name'] = qc_var_name
test_dict['variable_name'] = var_name
return test_dict
def remove_test(self, var_name=None, test_number=None, qc_var_name=None, flag_value=False,
flag_values_reset_value=0):
"""
Method to remove a test/filter from a quality control variable. Must set
var_name or qc_var_name.
Parameters
----------
var_name : str or None
Data variable name.
test_number : int
Test number to remove.
qc_var_name : str or None
Quality control variable name. Ignored if var_name is set.
flag_value : boolean
Switch to use flag_values integer quality control.
flag_values_reset_value : int
Value to use when resetting a flag_values value to not be set.
Examples
--------
> ds_object.qcfilter.remove_test(
var_name, test_number=3)
"""
if test_number is None:
raise ValueError('You need to provide a value for test_number '
'keyword when calling the remove_test() method')
if var_name is None and qc_var_name is None:
raise ValueError('You need to provide a value for var_name or qc_var_name '
'keyword when calling the remove_test() method')
if var_name is not None:
qc_var_name = self._obj.qcfilter.check_for_ancillary_qc(var_name)
# Determine which index is using the test number
index = None
if flag_value:
flag_values = self._obj[qc_var_name].attrs['flag_values']
for ii, flag_num in enumerate(flag_values):
if flag_num == test_number:
index = ii
break
else:
flag_masks = self._obj[qc_var_name].attrs['flag_masks']
for ii, bit_num in enumerate(flag_masks):
if parse_bit(bit_num)[0] == test_number:
index = ii
break
# If can't find the index of test return before doing anything.
if index is None:
return
if flag_value:
remove_index = self._obj.qcfilter.get_qc_test_mask(
var_name=var_name, qc_var_name=qc_var_name, test_number=test_number,
return_index=True, flag_value=True)
self._obj.qcfilter.unset_test(var_name=var_name, qc_var_name=qc_var_name,
index=remove_index, test_number=test_number,
flag_value=flag_value,
flag_values_reset_value=flag_values_reset_value)
del flag_values[index]
self._obj[qc_var_name].attrs['flag_values'] = flag_values
else:
remove_index = self._obj.qcfilter.get_qc_test_mask(
var_name=var_name, qc_var_name=qc_var_name, test_number=test_number,
return_index=True)
self._obj.qcfilter.unset_test(var_name=var_name, qc_var_name=qc_var_name,
index=remove_index, test_number=test_number,
flag_value=flag_value)
del flag_masks[index]
self._obj[qc_var_name].attrs['flag_masks'] = flag_masks
flag_meanings = self._obj[qc_var_name].attrs['flag_meanings']
del flag_meanings[index]
self._obj[qc_var_name].attrs['flag_meanings'] = flag_meanings
flag_assessments = self._obj[qc_var_name].attrs['flag_assessments']
del flag_assessments[index]
self._obj[qc_var_name].attrs['flag_assessments'] = flag_assessments
def set_test(self, var_name, index=None, test_number=None,
flag_value=False):
"""
Method to set a test/filter in a quality control variable.
Parameters
----------
var_name : str
Data variable name.
index : int or list or numpy array
Index to set test in quality control array. If want to
unset all values will need to pass in index of all values.
test_number : int
Test number to set.
flag_value : boolean
Switch to use flag_values integer quality control.
Examples
--------
.. code-block:: python
index = [0, 1, 2, 30]
ds_object.qcfilter.set_test(
var_name, index=index, test_number=2)
"""
qc_var_name = self._obj.qcfilter.check_for_ancillary_qc(var_name)
qc_variable = np.array(self._obj[qc_var_name].values)
# Determine if test number is too large for current data type. If so
# up convert data type.
dtype = qc_variable.dtype
if np.iinfo(dtype).max - set_bit(0, test_number) < -1:
if dtype == np.int8:
dtype = np.int16
elif dtype == np.int16:
dtype = np.int32
elif dtype == np.int32:
dtype = np.int64
qc_variable = qc_variable.astype(dtype)
if index is not None:
if flag_value:
qc_variable[index] = test_number
else:
qc_variable[index] = set_bit(qc_variable[index], test_number)
self._obj[qc_var_name].values = qc_variable
def unset_test(self, var_name=None, qc_var_name=None, index=None, test_number=None,
flag_value=False, flag_values_reset_value=0):
"""
Method to unset a test/filter from a quality control variable.
Parameters
----------
var_name : str or None
Data variable name.
qc_var_name : str or None
Quality control variable name. Ignored if var_name is set.
index : int or list or numpy array
Index to unset test in quality control array. If want to
unset all values will need to pass in index of all values.
test_number : int
Test number to remove.
flag_value : boolean
Switch to use flag_values integer quality control.
flag_values_reset_value : int
Value to use when resetting a flag_values value to not be set.
Examples
--------
.. code-block:: python
ds_object.qcfilter.unset_test(
var_name, index=0, test_number=2)
"""
if index is None:
return
if var_name is None and qc_var_name is None:
raise ValueError('You need to provide a value for var_name or qc_var_name '
'keyword when calling the unset_test() method')
if var_name is not None:
qc_var_name = self._obj.qcfilter.check_for_ancillary_qc(var_name)
qc_variable = self._obj[qc_var_name].values
if flag_value:
qc_variable[index] = flag_values_reset_value
else:
qc_variable[index] = unset_bit(qc_variable[index], test_number)
self._obj[qc_var_name].values = qc_variable
def available_bit(self, qc_var_name, recycle=False):
"""
Method to determine next available bit or flag to use with a QC test.
Will check for flag_masks first and if not found will check for
flag_values. This will drive how the next value is chosen.
Parameters
----------
qc_var_name : str
Quality control variable name.
recycle : boolean
Option to look for a bit (test) not in use starting from 1.
If a test is not defined will return the lowest number, else
will just use next highest number.
Returns
-------
test_num : int
Next available test number.
"""
try:
flag_masks = self._obj[qc_var_name].attrs['flag_masks']
flag_value = False
except KeyError:
try:
flag_masks = self._obj[qc_var_name].attrs['flag_values']
flag_value = True
except KeyError:
try:
self._obj[qc_var_name].attrs['flag_values']
flag_masks = self._obj[qc_var_name].attrs['flag_masks']
flag_value = False
except KeyError:
raise ValueError('Problem getting next value from '
'available_bit(). flag_values and '
'flag_masks not set as expected')
if flag_masks == []:
next_bit = 1
else:
if flag_value:
if recycle:
next_bit = min(set(range(1, 100000)) - set(flag_masks))
else:
next_bit = max(flag_masks) + 1
else:
if recycle:
tests = [parse_bit(mask)[0] for mask in flag_masks]
next_bit = min(set(range(1, 63)) - set(tests))
else:
next_bit = parse_bit(max(flag_masks))[0] + 1
return int(next_bit)
def get_qc_test_mask(self, var_name=None, test_number=None, qc_var_name=None,
flag_value=False, return_index=False):
"""
Returns a numpy array of False or True where a particular
flag or bit is set in a numpy array. Must set var_name or qc_var_name
when calling.
Parameters
----------
var_name : str or None
Data variable name.
test_number : int
Test number to return array where test is set.
qc_var_name : str or None
Quality control variable name. Ignored if var_name is set.
flag_value : boolean
Switch to use flag_values integer quality control.
return_index : boolean
Return a numpy array of index numbers into QC array where the
test is set instead of 0 or 1 mask.
Returns
-------
test_mask : bool array
A numpy boolean array with False or True where the test number or
bit was set.
Examples
--------
.. code-block:: python
from act.io.armfiles import read_netcdf
from act.tests import EXAMPLE_IRT25m20s
ds_object = read_netcdf(EXAMPLE_IRT25m20s)
var_name = 'inst_up_long_dome_resist'
result = ds_object.qcfilter.add_test(
var_name, index=[0, 1, 2], test_meaning='Birds!')
qc_var_name = result['qc_variable_name']
mask = ds_object.qcfilter.get_qc_test_mask(
var_name, result['test_number'], return_index=True)
print(mask)
array([0, 1, 2])
mask = ds_object.qcfilter.get_qc_test_mask(
var_name, result['test_number'])
print(mask)
array([ True, True, True, ..., False, False, False])
data = ds_object[var_name].values
print(data[mask])
array([7.84 , 7.8777, 7.8965], dtype=float32)
import numpy as np
data[mask] = np.nan
print(data)
array([ nan, nan, nan, ..., 7.6705, 7.6892, 7.6892],
dtype=float32)
"""
if var_name is None and qc_var_name is None:
raise ValueError('You need to provide a value for var_name or qc_var_name '
'keyword when calling the get_qc_test_mask() method')
if test_number is None:
raise ValueError('You need to provide a value for test_number '
'keyword when calling the get_qc_test_mask() method')
if var_name is not None:
qc_var_name = self._obj.qcfilter.check_for_ancillary_qc(var_name)
qc_variable = self._obj[qc_var_name].values
# Ensure the qc_variable data type is integer. This ensures bitwise comparison
# will not cause an error.
if qc_variable.dtype.kind not in np.typecodes["AllInteger"]:
qc_variable = qc_variable.astype(int)
if flag_value:
tripped = qc_variable == test_number
else:
check_bit = set_bit(0, test_number) & qc_variable
tripped = check_bit > 0
test_mask = np.full(qc_variable.shape, False, dtype='bool')
# Make sure test_mask is an array. If qc_variable is scalar will
# be retuned from np.zeros as scalar.
test_mask = np.atleast_1d(test_mask)
test_mask[tripped] = True
test_mask = np.ma.make_mask(test_mask, shrink=False)
if return_index:
test_mask = np.where(test_mask)[0]
return test_mask
def get_masked_data(self, var_name, rm_assessments=None,
rm_tests=None, return_nan_array=False,
ma_fill_value=None, return_inverse=False):
"""
Returns a numpy masked array containing data and mask or
a numpy float array with masked values set to NaN.
Parameters
----------
var_name : str
Data variable name.
rm_assessments : str or list of str
Assessment name to exclude from returned data.
rm_tests : int or list of int
Test numbers to exclude from returned data. This is the test
number (or bit position number) not the mask number.
return_nan_array : boolean
Return a numpy array with filtered ( or masked) values
set to numpy NaN value. If the data is type int will upconvert
to numpy float to allow setting NaN value.
ma_fill_value : int or float (or str?)
The numpy masked array fill_value used in creation of the the
masked array. If the datatype needs to be upconverted to allow
the fill value to be used, data will be upconverted.
return_inverse : boolean
Invert the masked array mask or return data array where mask is set
to False instead of True set to NaN. Useful for overplotting
where failing.
Returns
-------
variable : numpy masked array or numpy float array
Default is to return a numpy masked array with the mask set to
True where the test with requested assessment or test number
was found set.
If return_nan_array is True will return numpy array upconverted
to float with locations where the test with requested assessment
or test number was found set converted to NaN.
Examples
--------
.. code-block:: python
from act.io.armfiles import read_netcdf
from act.tests import EXAMPLE_IRT25m20s
ds_object = read_netcdf(EXAMPLE_IRT25m20s)
var_name = 'inst_up_long_dome_resist'
result = ds_object.qcfilter.add_test(
var_name, index=[0, 1, 2], test_meaning='Birds!')
data = ds_object.qcfilter.get_masked_data(var_name,
rm_assessments=['Bad', 'Indeterminate'])
print(data)
masked_array(data=[--, --, --, ..., 7.670499801635742,
7.689199924468994, 7.689199924468994],
mask=[ True, True, True, ..., False, False, False],
fill_value=1e+20, dtype=float32)
"""
qc_var_name = self._obj.qcfilter.check_for_ancillary_qc(
var_name, add_if_missing=False)
flag_value = False
flag_values = None
flag_masks = None
flag_assessments = None
try:
flag_assessments = self._obj[qc_var_name].attrs['flag_assessments']
flag_masks = self._obj[qc_var_name].attrs['flag_masks']
except KeyError:
pass
try:
flag_values = self._obj[qc_var_name].attrs['flag_values']
flag_value = True
except KeyError:
pass
test_numbers = []
if rm_tests is not None:
if isinstance(rm_tests, (int, float, str)):
rm_tests = [int(rm_tests)]
test_numbers.extend(rm_tests)
if rm_assessments is not None:
if isinstance(rm_assessments, str):
rm_assessments = [rm_assessments]
if flag_masks is not None:
test_nums = [parse_bit(mask)[0] for mask in flag_masks]
if flag_values is not None:
test_nums = flag_values
rm_assessments = [x.lower() for x in rm_assessments]
if flag_assessments is not None:
for ii, assessment in enumerate(flag_assessments):
if assessment.lower() in rm_assessments:
test_numbers.append(test_nums[ii])
# Make the list of test numbers to mask unique
test_numbers = list(set(test_numbers))
# Create mask of indexes by looking where each test is set
variable = self._obj[var_name].values
nan_dtype = np.float32
if variable.dtype in (np.float64, np.int64):
nan_dtype = np.float64
mask = np.zeros(variable.shape, dtype=bool)
for test in test_numbers:
mask = mask | self._obj.qcfilter.get_qc_test_mask(
var_name, test, flag_value=flag_value)
# Convert data numpy array into masked array
try:
variable = np.ma.array(variable, mask=mask,
fill_value=ma_fill_value)
except TypeError:
variable = np.ma.array(variable, mask=mask,
fill_value=ma_fill_value,
dtype=np.array(ma_fill_value).dtype)
# If requested switch array from where data is not failing tests
# to where data is failing tests. This can be used when over plotting
# where the data if failing the tests.
if return_inverse:
mask = variable.mask
mask = np.invert(mask)
variable.mask = mask
# If asked to return numpy array with values set to NaN
if return_nan_array:
variable = variable.astype(nan_dtype)
variable = variable.filled(fill_value=np.nan)
return variable
def datafilter(self, variables=None, rm_assessments=None, rm_tests=None,
np_ma=True, verbose=False, del_qc_var=True):
"""
Method to apply quality control variables to data variables by
changing the data values in the dataset using quality control variables.
The data variable is changed to to a numpy masked array with failing
data masked or, if requested, to numpy array with failing data set to
NaN. This can be used to update the data variable in the xarray
dataset for use with xarray methods to perform analysis on the data
since those methods don't read the quality control variables.
Parameters
----------
variables : None or str or list of str
Data variable names to process
rm_assessments : str or list of str
Assessment names listed under quality control varible flag_assessments
to exclude from returned data. Examples include
['Bad', 'Incorrect', 'Indeterminate', 'Suspect']
rm_tests : int or list of int
Test numbers listed under quality control variable to exclude from
returned data. This is the test
number (or bit position number) not the mask number.
np_ma : boolean
Shoudl the data in the xarray DataArray be set to numpy masked
arrays. This should work with most xarray methods. If the xarray
processing method does not work with numpy masked array set to
False to use NaN.
verbose : boolean
Print processing information.
del_qc_var : boolean
Opttion to delete quality control variable after processing. Since
the data values can not be determined after they are set to NaN
and xarray method processing would also process the quality control
variables, the default is to remove the quality control data
variables. If numpy masked arrays are used the data are not lost
but would need to be extracted and set to DataArray to return the
dataset back to original state.
Examples
--------
.. code-block:: python
from act.io.armfiles import read_netcdf
from act.tests import EXAMPLE_MET1
ds = read_netcdf(EXAMPLE_MET1)
ds.clean.cleanup()
var_name = 'atmos_pressure'
ds_1 = ds.mean()
ds.qcfilter.add_less_test(var_name, 99, test_assessment='Bad')
ds.qcfilter.datafilter(rm_assessments='Bad')
ds_2 = ds.mean()
print(f'All data: {ds_1[var_name].values}, Bad Removed: {ds_2[var_name].values}')
All data: 98.86097717285156, Bad Removed: 99.15148162841797
"""
if variables is not None and isinstance(variables, str):
variables = [variables]
if variables is None:
variables = list(self._obj.data_vars)
for var_name in variables:
qc_var_name = self.check_for_ancillary_qc(var_name,
add_if_missing=False,
cleanup=False)
if qc_var_name is None:
if verbose:
print(f'No quality control variable for {var_name} found '
f'in call to .qcfilter.datafilter()')
continue
data = self.get_masked_data(var_name, rm_assessments=rm_assessments,
rm_tests=rm_tests, ma_fill_value=np_ma)
self._obj[var_name].values = data
if del_qc_var:
del self._obj[qc_var_name]
if verbose:
print(f'Deleting {qc_var_name} from dataset')
def set_bit(array, bit_number):
"""
Function to set a quality control bit given an a scalar or
array of values and a bit number.
Parameters
----------
array : int or numpy array
The bitpacked array to set the bit number.
bit_number : int
The bit (or test) number to set.
Returns
-------
array : int, numpy array, tuple, list
Integer or numpy array with bit set for each element of the array.
Returned in same type.
Examples
--------
Example use setting bit 2 to an array called data:
.. code-block:: python
data = np.array(range(0, 7))
data = set_bit(data, 2)
print(data)
array([2, 3, 2, 3, 6, 7, 6])
"""
was_list = False
was_tuple = False
if isinstance(array, list):
array = np.array(array)
was_list = True
if isinstance(array, tuple):
array = np.array(array)
was_tuple = True
if bit_number > 0:
array |= (1 << bit_number - 1)
if was_list:
array = list(array)
if was_tuple:
array = tuple(array)
return array
def unset_bit(array, bit_number):
"""
Function to remove a quality control bit given a
scalar or array of values and a bit number.
Parameters
----------
array : int or numpy array
Array of integers containing bit packed numbers.
bit_number : int
Bit number to remove.
Returns
-------
array : int or numpy array
Returns same data type as array entered with bit removed. Will
fail gracefully if the bit requested to be removed was not set.
Examples
--------
Example use removing bit 2 from an array called data:
> data = set_bit(0,2)
> data = set_bit(data,3)
> data
6
> data = unset_bit(data,2)
> data
4
"""
was_list = False
was_tuple = False
if isinstance(array, list):
array = np.array(array)
was_list = True
if isinstance(array, tuple):
array = np.array(array)
was_tuple = True
if bit_number > 0:
array = array & ~ (1 << bit_number - 1)
if was_list:
array = list(array)
if was_tuple:
array = tuple(array)
return array
def parse_bit(qc_bit):
"""
Given a single integer value, return bit positions.
Parameters
----------
qc_bit : int or numpy int
Bit packed integer number to be parsed.
Returns
-------
bit_number : numpy.int32 array
Array containing all bit numbers of the bit packed number.
If no bits set returns empty array.
Examples
--------
> parse_bit(7)
array([1, 2, 3])
"""
if isinstance(qc_bit, (list, tuple, np.ndarray)):
if len(qc_bit) > 1:
raise ValueError("Must be a single value.")
qc_bit = qc_bit[0]
if qc_bit < 0:
raise ValueError("Must be a positive integer.")
bit_number = []
qc_bit = int(qc_bit)
counter = 0
while qc_bit > 0:
temp_value = qc_bit % 2
qc_bit = qc_bit >> 1
counter += 1
if temp_value == 1:
bit_number.append(counter)
bit_number = np.asarray(bit_number, dtype=np.int32)
return bit_number
<|code_end|>
|
Error when clean routine tries to remove unused bits
When running the script below on the ARM ADC systems, an error occurs when trying to add a DQR to the QC which stems from trying to remove unused bits from those QC variables
```
File “/home/kehoe/dev/dq/lib/python/ACT/act/qc/qcfilter.py”, line 1065, in unset_bit
array = array & ~(1 << bit_number - 1)
TypeError: ufunc ‘bitwise_and’ not supported for the input types, and the inputs could not be safely coerced to any supported types according to the casting rule ‘’safe’'
```
```
import act
import glob
import numpy as np
ds = 'sgpmetE13.b1'
site = ds[0:3]
files = glob.glob('/data/archive/'+site+'/'+ds+'/'+ds+'.2010*cdf')
years = [f.split('.')[-3][0:4] for f in files]
years = np.unique(years)
for y in years:
files = glob.glob('/data/archive/'+site+'/'+ds+'/'+ds+'.'+y+'*cdf')
files.sort()
obj = act.io.armfiles.read_netcdf(files)
obj = act.qc.arm.add_dqr_to_qc(obj, variable='temp_mean')
obj = obj.where(obj['qc_temp_mean'] == 0)
obj = obj.resample(time='M').mean()
for i in range(len(obj['time'].values)):
print(','.join([str(obj['time'].values[i]), str(obj['temp_mean'].values[i])]))
obj.close()
```
|
act/qc/qcfilter.py
<|code_start|>"""
Functions and methods for creating ancillary quality control variables
and filters (masks) which can be used with various corrections
routines in ACT.
"""
import dask
import numpy as np
import xarray as xr
from act.qc import comparison_tests, qctests
@xr.register_dataset_accessor('qcfilter')
class QCFilter(qctests.QCTests, comparison_tests.QCTests):
"""
A class for building quality control variables containing arrays for
filtering data based on a set of test condition typically based on the
values in the data fields. These filters can be used in various
algorithms and calculations within ACT.
"""
def __init__(self, xarray_obj):
"""initialize"""
self._obj = xarray_obj
def check_for_ancillary_qc(self, var_name, add_if_missing=True, cleanup=False, flag_type=False):
"""
Method to check if a quality control variable exist in the dataset
and return the quality control varible name.
Will call create_qc_variable() to make variable if does not exist
and update_ancillary_variable() to ensure linkage between data and
quality control variable. Can also be used just to get the
corresponding quality control variable name with adding if
it is missing.
Parameters
----------
var_name : str
Data variable name.
add_if_missing : boolean
Add quality control variable if missing from object.
cleanup : boolean
Option to run qc.clean.cleanup() method on the object
to ensure the object was updated from ARM QC to the
correct standardized QC.
flag_type : boolean
Indicating the QC variable uses flag_values instead of
flag_masks.
Returns
-------
qc_var_name : str or None
Name of existing or new quality control variable. Returns
None if no existing quality control variable is found and
add_if_missing is set to False.
"""
qc_var_name = None
try:
ancillary_variables = self._obj[var_name].attrs['ancillary_variables']
if isinstance(ancillary_variables, str):
ancillary_variables = ancillary_variables.split()
for var in ancillary_variables:
for attr, value in self._obj[var].attrs.items():
if attr == 'standard_name' and 'quality_flag' in value:
qc_var_name = var
if add_if_missing and qc_var_name is None:
qc_var_name = self._obj.qcfilter.create_qc_variable(var_name, flag_type=flag_type)
except KeyError:
# Since no ancillary_variables exist look for ARM style of QC
# variable name. If it exists use it else create new
# QC varaible.
if add_if_missing:
try:
self._obj['qc_' + var_name]
qc_var_name = 'qc_' + var_name
except KeyError:
qc_var_name = self._obj.qcfilter.create_qc_variable(
var_name, flag_type=flag_type
)
# Make sure data varaible has a variable attribute linking
# data variable to QC variable.
if add_if_missing:
self._obj.qcfilter.update_ancillary_variable(var_name, qc_var_name)
# Clean up quality control variables to the requried standard in the
# xarray object.
if cleanup:
self._obj.clean.cleanup(handle_missing_value=True, link_qc_variables=False)
return qc_var_name
def create_qc_variable(
self, var_name, flag_type=False, flag_values_set_value=0, qc_var_name=None
):
"""
Method to create a quality control variable in the dataset.
Will try not to destroy the qc variable by appending numbers
to the variable name if needed.
Parameters
----------
var_name : str
Data variable name.
flag_type : boolean
If an integer flag type should be created instead of
bitpacked mask type. Will create flag_values instead of
flag_masks.
flag_values_set_value : int
Initial flag value to use when initializing array.
qc_var_name : str
Optional new quality control variable name. If not set
will create one using \\"qc\\_\\" prepended to the data
variable name. If the name given or created is taken
will append a number that does not have a conflict.
Returns
-------
qc_var_name : str
Name of new quality control variable created.
"""
# Make QC variable long name. The variable long_name attribute
# may not exist so catch that error and set to default.
try:
qc_variable_long_name = (
'Quality check results on field: ' + self._obj[var_name].attrs['long_name']
)
except KeyError:
qc_variable_long_name = 'Quality check results for ' + var_name
# Make a new quality control variable name. Check if exists in the
# dataset. If so loop through creation of new name until one is
# found that will not replace existing variable.
if qc_var_name is None:
qc_var_name = 'qc_' + var_name
variable_names = list(self._obj.data_vars)
if qc_var_name in variable_names:
for ii in range(1, 100):
temp_qc_var_name = '_'.join([qc_var_name, str(ii)])
if temp_qc_var_name not in variable_names:
qc_var_name = temp_qc_var_name
break
# Create the QC variable filled with 0 values matching the
# shape of data variable.
try:
qc_data = dask.array.from_array(
np.zeros_like(self._obj[var_name].values, dtype=np.int32),
chunks=self._obj[var_name].data.chunksize,
)
except AttributeError:
qc_data = np.zeros_like(self._obj[var_name].values, dtype=np.int32)
# Updating to use coords instead of dim, which caused a loss of
# attribuets as noted in Issue 347
self._obj[qc_var_name] = xr.DataArray(
data=qc_data,
coords=self._obj[var_name].coords,
attrs={'long_name': qc_variable_long_name, 'units': '1'},
)
# Update if using flag_values and don't want 0 to be default value.
if flag_type and flag_values_set_value != 0:
self._obj[qc_var_name].values = self._obj[qc_var_name].values + int(
flag_values_set_value
)
# Add requried variable attributes.
if flag_type:
self._obj[qc_var_name].attrs['flag_values'] = []
else:
self._obj[qc_var_name].attrs['flag_masks'] = []
self._obj[qc_var_name].attrs['flag_meanings'] = []
self._obj[qc_var_name].attrs['flag_assessments'] = []
self._obj[qc_var_name].attrs['standard_name'] = 'quality_flag'
self.update_ancillary_variable(var_name, qc_var_name=qc_var_name)
return qc_var_name
def update_ancillary_variable(self, var_name, qc_var_name=None):
"""
Method to check if ancillary_variables variable attribute
is set with quality control variable name.
Parameters
----------
var_name : str
Data variable name.
qc_var_name : str
quality control variable name. If not given will attempt
to get the name from data variable ancillary_variables
attribute.
"""
if qc_var_name is None:
qc_var_name = self._obj.qcfilter.check_for_ancillary_qc(var_name, add_if_missing=False)
if qc_var_name is None:
return
try:
ancillary_variables = self._obj[var_name].attrs['ancillary_variables']
if qc_var_name not in ancillary_variables:
ancillary_variables = ' '.join([ancillary_variables, qc_var_name])
except KeyError:
ancillary_variables = qc_var_name
self._obj[var_name].attrs['ancillary_variables'] = ancillary_variables
def add_test(
self,
var_name,
index=None,
test_number=None,
test_meaning=None,
test_assessment='Bad',
flag_value=False,
recycle=False,
):
"""
Method to add a new test/filter to a quality control variable.
Parameters
----------
var_name : str
data variable name
index : int, bool, list of int or bool, numpy array, tuple of numpy arrays
Indexes into quality control array to set the test bit.
If not set or set to None will not set the test on any
element of the quality control variable but will still
add the test to the flag_masks, flag_meanings and
flag_assessments attributes.
test_number : int
Test number to use. If keyword is not set will use first
available test bit/test number.
recyle : boolean
Option to use number less than next highest test if available. For example
tests 1, 2, 4, 5 are set. Set to true the next test chosen will be 3, else
will be 6.
test_meaning : str
String describing the test. Will be added to flag_meanings
variable attribute.
test_assessment : str
String describing the test assessment. If not set will use
"Bad" as the string to append to flag_assessments. Will
update to be lower case and then capitalized.
flag_value : boolean
Switch to use flag_values integer quality control.
Returns
-------
test_dict : dict
A dictionary containing information added to the QC
variable.
Examples
--------
> result = ds_object.qcfilter.add_test(
var_name, test_meaning='Birds!')
"""
test_dict = {}
if test_meaning is None:
raise ValueError(
'You need to provide a value for test_meaning '
'keyword when calling the add_test method'
)
# This ensures the indexing will work even if given float values.
# Preserves tuples from np.where() or boolean arrays for standard
# python indexing.
if index is not None and not isinstance(index, (np.ndarray, tuple)):
index = np.array(index)
if index.dtype.kind not in np.typecodes['AllInteger']:
index = index.astype(int)
# Ensure assessment is lowercase and capitalized to be consistent
test_assessment = test_assessment.lower().capitalize()
qc_var_name = self._obj.qcfilter.check_for_ancillary_qc(var_name, flag_type=flag_value)
if test_number is None:
test_number = self._obj.qcfilter.available_bit(qc_var_name, recycle=recycle)
self._obj.qcfilter.set_test(var_name, index, test_number, flag_value)
if flag_value:
try:
self._obj[qc_var_name].attrs['flag_values'].append(test_number)
except KeyError:
self._obj[qc_var_name].attrs['flag_values'] = [test_number]
else:
# Determine if flag_masks test number is too large for current data type.
# If so up convert data type.
flag_masks = np.array(self._obj[qc_var_name].attrs['flag_masks'])
mask_dtype = flag_masks.dtype
if not np.issubdtype(mask_dtype, np.integer):
mask_dtype = np.uint32
if np.iinfo(mask_dtype).max - set_bit(0, test_number) <= -1:
if mask_dtype == np.int8 or mask_dtype == np.uint8:
mask_dtype = np.uint16
elif mask_dtype == np.int16 or mask_dtype == np.uint16:
mask_dtype = np.uint32
elif mask_dtype == np.int32 or mask_dtype == np.uint32:
mask_dtype = np.uint64
flag_masks = flag_masks.astype(mask_dtype)
flag_masks = np.append(flag_masks, np.array(set_bit(0, test_number), dtype=mask_dtype))
self._obj[qc_var_name].attrs['flag_masks'] = list(flag_masks)
try:
self._obj[qc_var_name].attrs['flag_meanings'].append(test_meaning)
except KeyError:
self._obj[qc_var_name].attrs['flag_meanings'] = [test_meaning]
try:
self._obj[qc_var_name].attrs['flag_assessments'].append(test_assessment)
except KeyError:
self._obj[qc_var_name].attrs['flag_assessments'] = [test_assessment]
test_dict['test_number'] = test_number
test_dict['test_meaning'] = test_meaning
test_dict['test_assessment'] = test_assessment
test_dict['qc_variable_name'] = qc_var_name
test_dict['variable_name'] = var_name
return test_dict
def remove_test(
self,
var_name=None,
test_number=None,
qc_var_name=None,
flag_value=False,
flag_values_reset_value=0,
):
"""
Method to remove a test/filter from a quality control variable. Must set
var_name or qc_var_name.
Parameters
----------
var_name : str or None
Data variable name.
test_number : int
Test number to remove.
qc_var_name : str or None
Quality control variable name. Ignored if var_name is set.
flag_value : boolean
Switch to use flag_values integer quality control.
flag_values_reset_value : int
Value to use when resetting a flag_values value to not be set.
Examples
--------
> ds_object.qcfilter.remove_test(
var_name, test_number=3)
"""
if test_number is None:
raise ValueError(
'You need to provide a value for test_number '
'keyword when calling the remove_test() method'
)
if var_name is None and qc_var_name is None:
raise ValueError(
'You need to provide a value for var_name or qc_var_name '
'keyword when calling the remove_test() method'
)
if var_name is not None:
qc_var_name = self._obj.qcfilter.check_for_ancillary_qc(var_name)
# Determine which index is using the test number
index = None
if flag_value:
flag_values = self._obj[qc_var_name].attrs['flag_values']
for ii, flag_num in enumerate(flag_values):
if flag_num == test_number:
index = ii
break
else:
flag_masks = self._obj[qc_var_name].attrs['flag_masks']
for ii, bit_num in enumerate(flag_masks):
if parse_bit(bit_num)[0] == test_number:
index = ii
break
# If can't find the index of test return before doing anything.
if index is None:
return
if flag_value:
remove_index = self._obj.qcfilter.get_qc_test_mask(
var_name=var_name,
qc_var_name=qc_var_name,
test_number=test_number,
return_index=True,
flag_value=True,
)
self._obj.qcfilter.unset_test(
var_name=var_name,
qc_var_name=qc_var_name,
index=remove_index,
test_number=test_number,
flag_value=flag_value,
flag_values_reset_value=flag_values_reset_value,
)
del flag_values[index]
self._obj[qc_var_name].attrs['flag_values'] = flag_values
else:
remove_index = self._obj.qcfilter.get_qc_test_mask(
var_name=var_name,
qc_var_name=qc_var_name,
test_number=test_number,
return_index=True,
)
self._obj.qcfilter.unset_test(
var_name=var_name,
qc_var_name=qc_var_name,
index=remove_index,
test_number=test_number,
flag_value=flag_value,
)
del flag_masks[index]
self._obj[qc_var_name].attrs['flag_masks'] = flag_masks
flag_meanings = self._obj[qc_var_name].attrs['flag_meanings']
del flag_meanings[index]
self._obj[qc_var_name].attrs['flag_meanings'] = flag_meanings
flag_assessments = self._obj[qc_var_name].attrs['flag_assessments']
del flag_assessments[index]
self._obj[qc_var_name].attrs['flag_assessments'] = flag_assessments
def set_test(self, var_name, index=None, test_number=None, flag_value=False):
"""
Method to set a test/filter in a quality control variable.
Parameters
----------
var_name : str
Data variable name.
index : int or list or numpy array
Index to set test in quality control array. If want to
unset all values will need to pass in index of all values.
test_number : int
Test number to set.
flag_value : boolean
Switch to use flag_values integer quality control.
Examples
--------
.. code-block:: python
index = [0, 1, 2, 30]
ds_object.qcfilter.set_test(var_name, index=index, test_number=2)
"""
qc_var_name = self._obj.qcfilter.check_for_ancillary_qc(var_name)
qc_variable = np.array(self._obj[qc_var_name].values)
# Determine if test number is too large for current data type. If so
# up convert data type.
dtype = qc_variable.dtype
if np.iinfo(dtype).max - set_bit(0, test_number) < -1:
if dtype == np.int8:
dtype = np.int16
elif dtype == np.int16:
dtype = np.int32
elif dtype == np.int32:
dtype = np.int64
qc_variable = qc_variable.astype(dtype)
if index is not None:
if flag_value:
qc_variable[index] = test_number
else:
qc_variable[index] = set_bit(qc_variable[index], test_number)
self._obj[qc_var_name].values = qc_variable
def unset_test(
self,
var_name=None,
qc_var_name=None,
index=None,
test_number=None,
flag_value=False,
flag_values_reset_value=0,
):
"""
Method to unset a test/filter from a quality control variable.
Parameters
----------
var_name : str or None
Data variable name.
qc_var_name : str or None
Quality control variable name. Ignored if var_name is set.
index : int or list or numpy array
Index to unset test in quality control array. If want to
unset all values will need to pass in index of all values.
test_number : int
Test number to remove.
flag_value : boolean
Switch to use flag_values integer quality control.
flag_values_reset_value : int
Value to use when resetting a flag_values value to not be set.
Examples
--------
.. code-block:: python
ds_object.qcfilter.unset_test(var_name, index=0, test_number=2)
"""
if index is None:
return
if var_name is None and qc_var_name is None:
raise ValueError(
'You need to provide a value for var_name or qc_var_name '
'keyword when calling the unset_test() method'
)
if var_name is not None:
qc_var_name = self._obj.qcfilter.check_for_ancillary_qc(var_name)
qc_variable = self._obj[qc_var_name].values
if flag_value:
qc_variable[index] = flag_values_reset_value
else:
qc_variable[index] = unset_bit(qc_variable[index], test_number)
self._obj[qc_var_name].values = qc_variable
def available_bit(self, qc_var_name, recycle=False):
"""
Method to determine next available bit or flag to use with a QC test.
Will check for flag_masks first and if not found will check for
flag_values. This will drive how the next value is chosen.
Parameters
----------
qc_var_name : str
Quality control variable name.
recycle : boolean
Option to look for a bit (test) not in use starting from 1.
If a test is not defined will return the lowest number, else
will just use next highest number.
Returns
-------
test_num : int
Next available test number.
"""
try:
flag_masks = self._obj[qc_var_name].attrs['flag_masks']
flag_value = False
except KeyError:
try:
flag_masks = self._obj[qc_var_name].attrs['flag_values']
flag_value = True
except KeyError:
try:
self._obj[qc_var_name].attrs['flag_values']
flag_masks = self._obj[qc_var_name].attrs['flag_masks']
flag_value = False
except KeyError:
raise ValueError(
'Problem getting next value from '
'available_bit(). flag_values and '
'flag_masks not set as expected'
)
if flag_masks == []:
next_bit = 1
else:
if flag_value:
if recycle:
next_bit = min(set(range(1, 100000)) - set(flag_masks))
else:
next_bit = max(flag_masks) + 1
else:
if recycle:
tests = [parse_bit(mask)[0] for mask in flag_masks]
next_bit = min(set(range(1, 63)) - set(tests))
else:
next_bit = parse_bit(max(flag_masks))[0] + 1
return int(next_bit)
def get_qc_test_mask(
self,
var_name=None,
test_number=None,
qc_var_name=None,
flag_value=False,
return_index=False,
):
"""
Returns a numpy array of False or True where a particular
flag or bit is set in a numpy array. Must set var_name or qc_var_name
when calling.
Parameters
----------
var_name : str or None
Data variable name.
test_number : int
Test number to return array where test is set.
qc_var_name : str or None
Quality control variable name. Ignored if var_name is set.
flag_value : boolean
Switch to use flag_values integer quality control.
return_index : boolean
Return a numpy array of index numbers into QC array where the
test is set instead of 0 or 1 mask.
Returns
-------
test_mask : bool array
A numpy boolean array with False or True where the test number or
bit was set.
Examples
--------
.. code-block:: python
from act.io.armfiles import read_netcdf
from act.tests import EXAMPLE_IRT25m20s
ds_object = read_netcdf(EXAMPLE_IRT25m20s)
var_name = "inst_up_long_dome_resist"
result = ds_object.qcfilter.add_test(
var_name, index=[0, 1, 2], test_meaning="Birds!"
)
qc_var_name = result["qc_variable_name"]
mask = ds_object.qcfilter.get_qc_test_mask(
var_name, result["test_number"], return_index=True
)
print(mask)
array([0, 1, 2])
mask = ds_object.qcfilter.get_qc_test_mask(var_name, result["test_number"])
print(mask)
array([True, True, True, ..., False, False, False])
data = ds_object[var_name].values
print(data[mask])
array([7.84, 7.8777, 7.8965], dtype=float32)
import numpy as np
data[mask] = np.nan
print(data)
array([nan, nan, nan, ..., 7.6705, 7.6892, 7.6892], dtype=float32)
"""
if var_name is None and qc_var_name is None:
raise ValueError(
'You need to provide a value for var_name or qc_var_name '
'keyword when calling the get_qc_test_mask() method'
)
if test_number is None:
raise ValueError(
'You need to provide a value for test_number '
'keyword when calling the get_qc_test_mask() method'
)
if var_name is not None:
qc_var_name = self._obj.qcfilter.check_for_ancillary_qc(var_name)
qc_variable = self._obj[qc_var_name].values
# Ensure the qc_variable data type is integer. This ensures bitwise comparison
# will not cause an error.
if qc_variable.dtype.kind not in np.typecodes['AllInteger']:
qc_variable = qc_variable.astype(int)
if flag_value:
tripped = qc_variable == test_number
else:
check_bit = set_bit(0, test_number) & qc_variable
tripped = check_bit > 0
test_mask = np.full(qc_variable.shape, False, dtype='bool')
# Make sure test_mask is an array. If qc_variable is scalar will
# be retuned from np.zeros as scalar.
test_mask = np.atleast_1d(test_mask)
test_mask[tripped] = True
test_mask = np.ma.make_mask(test_mask, shrink=False)
if return_index:
test_mask = np.where(test_mask)[0]
return test_mask
def get_masked_data(
self,
var_name,
rm_assessments=None,
rm_tests=None,
return_nan_array=False,
ma_fill_value=None,
return_inverse=False,
):
"""
Returns a numpy masked array containing data and mask or
a numpy float array with masked values set to NaN.
Parameters
----------
var_name : str
Data variable name.
rm_assessments : str or list of str
Assessment name to exclude from returned data.
rm_tests : int or list of int
Test numbers to exclude from returned data. This is the test
number (or bit position number) not the mask number.
return_nan_array : boolean
Return a numpy array with filtered ( or masked) values
set to numpy NaN value. If the data is type int will upconvert
to numpy float to allow setting NaN value.
ma_fill_value : int or float (or str?)
The numpy masked array fill_value used in creation of the the
masked array. If the datatype needs to be upconverted to allow
the fill value to be used, data will be upconverted.
return_inverse : boolean
Invert the masked array mask or return data array where mask is set
to False instead of True set to NaN. Useful for overplotting
where failing.
Returns
-------
variable : numpy masked array or numpy float array
Default is to return a numpy masked array with the mask set to
True where the test with requested assessment or test number
was found set.
If return_nan_array is True will return numpy array upconverted
to float with locations where the test with requested assessment
or test number was found set converted to NaN.
Examples
--------
.. code-block:: python
from act.io.armfiles import read_netcdf
from act.tests import EXAMPLE_IRT25m20s
ds_object = read_netcdf(EXAMPLE_IRT25m20s)
var_name = "inst_up_long_dome_resist"
result = ds_object.qcfilter.add_test(
var_name, index=[0, 1, 2], test_meaning="Birds!"
)
data = ds_object.qcfilter.get_masked_data(
var_name, rm_assessments=["Bad", "Indeterminate"]
)
print(data)
masked_array(
data=[..., 7.670499801635742, 7.689199924468994, 7.689199924468994],
mask=[..., False, False, False],
fill_value=1e20,
dtype=float32,
)
"""
qc_var_name = self._obj.qcfilter.check_for_ancillary_qc(var_name, add_if_missing=False)
flag_value = False
flag_values = None
flag_masks = None
flag_assessments = None
try:
flag_assessments = self._obj[qc_var_name].attrs['flag_assessments']
flag_masks = self._obj[qc_var_name].attrs['flag_masks']
except KeyError:
pass
try:
flag_values = self._obj[qc_var_name].attrs['flag_values']
flag_value = True
except KeyError:
pass
test_numbers = []
if rm_tests is not None:
if isinstance(rm_tests, (int, float, str)):
rm_tests = [int(rm_tests)]
test_numbers.extend(rm_tests)
if rm_assessments is not None:
if isinstance(rm_assessments, str):
rm_assessments = [rm_assessments]
if flag_masks is not None:
test_nums = [parse_bit(mask)[0] for mask in flag_masks]
if flag_values is not None:
test_nums = flag_values
rm_assessments = [x.lower() for x in rm_assessments]
if flag_assessments is not None:
for ii, assessment in enumerate(flag_assessments):
if assessment.lower() in rm_assessments:
test_numbers.append(test_nums[ii])
# Make the list of test numbers to mask unique
test_numbers = list(set(test_numbers))
# Create mask of indexes by looking where each test is set
variable = self._obj[var_name].values
nan_dtype = np.float32
if variable.dtype in (np.float64, np.int64):
nan_dtype = np.float64
mask = np.zeros(variable.shape, dtype=bool)
for test in test_numbers:
mask = mask | self._obj.qcfilter.get_qc_test_mask(var_name, test, flag_value=flag_value)
# Convert data numpy array into masked array
try:
variable = np.ma.array(variable, mask=mask, fill_value=ma_fill_value)
except TypeError:
variable = np.ma.array(
variable,
mask=mask,
fill_value=ma_fill_value,
dtype=np.array(ma_fill_value).dtype,
)
# If requested switch array from where data is not failing tests
# to where data is failing tests. This can be used when over plotting
# where the data if failing the tests.
if return_inverse:
mask = variable.mask
mask = np.invert(mask)
variable.mask = mask
# If asked to return numpy array with values set to NaN
if return_nan_array:
variable = variable.astype(nan_dtype)
variable = variable.filled(fill_value=np.nan)
return variable
def datafilter(
self,
variables=None,
rm_assessments=None,
rm_tests=None,
np_ma=True,
verbose=False,
del_qc_var=True,
):
"""
Method to apply quality control variables to data variables by
changing the data values in the dataset using quality control variables.
The data variable is changed to to a numpy masked array with failing
data masked or, if requested, to numpy array with failing data set to
NaN. This can be used to update the data variable in the xarray
dataset for use with xarray methods to perform analysis on the data
since those methods don't read the quality control variables.
Parameters
----------
variables : None or str or list of str
Data variable names to process
rm_assessments : str or list of str
Assessment names listed under quality control varible flag_assessments
to exclude from returned data. Examples include
['Bad', 'Incorrect', 'Indeterminate', 'Suspect']
rm_tests : int or list of int
Test numbers listed under quality control variable to exclude from
returned data. This is the test
number (or bit position number) not the mask number.
np_ma : boolean
Shoudl the data in the xarray DataArray be set to numpy masked
arrays. This should work with most xarray methods. If the xarray
processing method does not work with numpy masked array set to
False to use NaN.
verbose : boolean
Print processing information.
del_qc_var : boolean
Opttion to delete quality control variable after processing. Since
the data values can not be determined after they are set to NaN
and xarray method processing would also process the quality control
variables, the default is to remove the quality control data
variables. If numpy masked arrays are used the data are not lost
but would need to be extracted and set to DataArray to return the
dataset back to original state.
Examples
--------
.. code-block:: python
from act.io.armfiles import read_netcdf
from act.tests import EXAMPLE_MET1
ds = read_netcdf(EXAMPLE_MET1)
ds.clean.cleanup()
var_name = "atmos_pressure"
ds_1 = ds.mean()
ds.qcfilter.add_less_test(var_name, 99, test_assessment="Bad")
ds.qcfilter.datafilter(rm_assessments="Bad")
ds_2 = ds.mean()
print("All_data=", ds_1[var_name].values)
All_data = 98.86098
print("Bad_Removed=", ds_2[var_name].values)
Bad_Removed = 99.15148
"""
if variables is not None and isinstance(variables, str):
variables = [variables]
if variables is None:
variables = list(self._obj.data_vars)
for var_name in variables:
qc_var_name = self.check_for_ancillary_qc(var_name, add_if_missing=False, cleanup=False)
if qc_var_name is None:
if verbose:
print(
f'No quality control variable for {var_name} found '
f'in call to .qcfilter.datafilter()'
)
continue
data = self.get_masked_data(
var_name,
rm_assessments=rm_assessments,
rm_tests=rm_tests,
ma_fill_value=np_ma,
)
self._obj[var_name].values = data
if del_qc_var:
del self._obj[qc_var_name]
if verbose:
print(f'Deleting {qc_var_name} from dataset')
def set_bit(array, bit_number):
"""
Function to set a quality control bit given an a scalar or
array of values and a bit number.
Parameters
----------
array : int or numpy array
The bitpacked array to set the bit number.
bit_number : int
The bit (or test) number to set.
Returns
-------
array : int, numpy array, tuple, list
Integer or numpy array with bit set for each element of the array.
Returned in same type.
Examples
--------
Example use setting bit 2 to an array called data:
.. code-block:: python
data = np.array(range(0, 7))
data = set_bit(data, 2)
print(data)
array([2, 3, 2, 3, 6, 7, 6])
"""
was_list = False
was_tuple = False
if isinstance(array, list):
array = np.array(array)
was_list = True
if isinstance(array, tuple):
array = np.array(array)
was_tuple = True
if bit_number > 0:
array |= 1 << bit_number - 1
if was_list:
array = list(array)
if was_tuple:
array = tuple(array)
return array
def unset_bit(array, bit_number):
"""
Function to remove a quality control bit given a
scalar or array of values and a bit number.
Parameters
----------
array : int or numpy array
Array of integers containing bit packed numbers.
bit_number : int
Bit number to remove.
Returns
-------
array : int or numpy array
Returns same data type as array entered with bit removed. Will
fail gracefully if the bit requested to be removed was not set.
Examples
--------
Example use removing bit 2 from an array called data:
> data = set_bit(0,2)
> data = set_bit(data,3)
> data
6
> data = unset_bit(data,2)
> data
4
"""
was_list = False
was_tuple = False
if isinstance(array, list):
array = np.array(array)
was_list = True
if isinstance(array, tuple):
array = np.array(array)
was_tuple = True
if bit_number > 0:
array = array & ~(1 << bit_number - 1)
if was_list:
array = list(array)
if was_tuple:
array = tuple(array)
return array
def parse_bit(qc_bit):
"""
Given a single integer value, return bit positions.
Parameters
----------
qc_bit : int or numpy int
Bit packed integer number to be parsed.
Returns
-------
bit_number : numpy.int32 array
Array containing all bit numbers of the bit packed number.
If no bits set returns empty array.
Examples
--------
> parse_bit(7)
array([1, 2, 3])
"""
if isinstance(qc_bit, (list, tuple, np.ndarray)):
if len(qc_bit) > 1:
raise ValueError('Must be a single value.')
qc_bit = qc_bit[0]
if qc_bit < 0:
raise ValueError('Must be a positive integer.')
bit_number = []
qc_bit = int(qc_bit)
counter = 0
while qc_bit > 0:
temp_value = qc_bit % 2
qc_bit = qc_bit >> 1
counter += 1
if temp_value == 1:
bit_number.append(counter)
bit_number = np.asarray(bit_number, dtype=np.int32)
return bit_number
<|code_end|>
|
act/qc/qcfilter.py
<|code_start|>"""
Functions and methods for creating ancillary quality control variables
and filters (masks) which can be used with various corrections
routines in ACT.
"""
import dask
import numpy as np
import xarray as xr
from act.qc import comparison_tests, qctests
@xr.register_dataset_accessor('qcfilter')
class QCFilter(qctests.QCTests, comparison_tests.QCTests):
"""
A class for building quality control variables containing arrays for
filtering data based on a set of test condition typically based on the
values in the data fields. These filters can be used in various
algorithms and calculations within ACT.
"""
def __init__(self, xarray_obj):
"""initialize"""
self._obj = xarray_obj
def check_for_ancillary_qc(self, var_name, add_if_missing=True, cleanup=False, flag_type=False):
"""
Method to check if a quality control variable exist in the dataset
and return the quality control varible name.
Will call create_qc_variable() to make variable if does not exist
and update_ancillary_variable() to ensure linkage between data and
quality control variable. Can also be used just to get the
corresponding quality control variable name with adding if
it is missing.
Parameters
----------
var_name : str
Data variable name.
add_if_missing : boolean
Add quality control variable if missing from object.
cleanup : boolean
Option to run qc.clean.cleanup() method on the object
to ensure the object was updated from ARM QC to the
correct standardized QC.
flag_type : boolean
Indicating the QC variable uses flag_values instead of
flag_masks.
Returns
-------
qc_var_name : str or None
Name of existing or new quality control variable. Returns
None if no existing quality control variable is found and
add_if_missing is set to False.
"""
qc_var_name = None
try:
ancillary_variables = self._obj[var_name].attrs['ancillary_variables']
if isinstance(ancillary_variables, str):
ancillary_variables = ancillary_variables.split()
for var in ancillary_variables:
for attr, value in self._obj[var].attrs.items():
if attr == 'standard_name' and 'quality_flag' in value:
qc_var_name = var
if add_if_missing and qc_var_name is None:
qc_var_name = self._obj.qcfilter.create_qc_variable(var_name, flag_type=flag_type)
except KeyError:
# Since no ancillary_variables exist look for ARM style of QC
# variable name. If it exists use it else create new
# QC varaible.
if add_if_missing:
try:
self._obj['qc_' + var_name]
qc_var_name = 'qc_' + var_name
except KeyError:
qc_var_name = self._obj.qcfilter.create_qc_variable(
var_name, flag_type=flag_type
)
# Make sure data varaible has a variable attribute linking
# data variable to QC variable.
if add_if_missing:
self._obj.qcfilter.update_ancillary_variable(var_name, qc_var_name)
# Clean up quality control variables to the requried standard in the
# xarray object.
if cleanup:
self._obj.clean.cleanup(handle_missing_value=True, link_qc_variables=False)
return qc_var_name
def create_qc_variable(
self, var_name, flag_type=False, flag_values_set_value=0, qc_var_name=None
):
"""
Method to create a quality control variable in the dataset.
Will try not to destroy the qc variable by appending numbers
to the variable name if needed.
Parameters
----------
var_name : str
Data variable name.
flag_type : boolean
If an integer flag type should be created instead of
bitpacked mask type. Will create flag_values instead of
flag_masks.
flag_values_set_value : int
Initial flag value to use when initializing array.
qc_var_name : str
Optional new quality control variable name. If not set
will create one using \\"qc\\_\\" prepended to the data
variable name. If the name given or created is taken
will append a number that does not have a conflict.
Returns
-------
qc_var_name : str
Name of new quality control variable created.
"""
# Make QC variable long name. The variable long_name attribute
# may not exist so catch that error and set to default.
try:
qc_variable_long_name = (
'Quality check results on field: ' + self._obj[var_name].attrs['long_name']
)
except KeyError:
qc_variable_long_name = 'Quality check results for ' + var_name
# Make a new quality control variable name. Check if exists in the
# dataset. If so loop through creation of new name until one is
# found that will not replace existing variable.
if qc_var_name is None:
qc_var_name = 'qc_' + var_name
variable_names = list(self._obj.data_vars)
if qc_var_name in variable_names:
for ii in range(1, 100):
temp_qc_var_name = '_'.join([qc_var_name, str(ii)])
if temp_qc_var_name not in variable_names:
qc_var_name = temp_qc_var_name
break
# Create the QC variable filled with 0 values matching the
# shape of data variable.
try:
qc_data = dask.array.from_array(
np.zeros_like(self._obj[var_name].values, dtype=np.int32),
chunks=self._obj[var_name].data.chunksize,
)
except AttributeError:
qc_data = np.zeros_like(self._obj[var_name].values, dtype=np.int32)
# Updating to use coords instead of dim, which caused a loss of
# attribuets as noted in Issue 347
self._obj[qc_var_name] = xr.DataArray(
data=qc_data,
coords=self._obj[var_name].coords,
attrs={'long_name': qc_variable_long_name, 'units': '1'},
)
# Update if using flag_values and don't want 0 to be default value.
if flag_type and flag_values_set_value != 0:
self._obj[qc_var_name].values = self._obj[qc_var_name].values + int(
flag_values_set_value
)
# Add requried variable attributes.
if flag_type:
self._obj[qc_var_name].attrs['flag_values'] = []
else:
self._obj[qc_var_name].attrs['flag_masks'] = []
self._obj[qc_var_name].attrs['flag_meanings'] = []
self._obj[qc_var_name].attrs['flag_assessments'] = []
self._obj[qc_var_name].attrs['standard_name'] = 'quality_flag'
self.update_ancillary_variable(var_name, qc_var_name=qc_var_name)
return qc_var_name
def update_ancillary_variable(self, var_name, qc_var_name=None):
"""
Method to check if ancillary_variables variable attribute
is set with quality control variable name.
Parameters
----------
var_name : str
Data variable name.
qc_var_name : str
quality control variable name. If not given will attempt
to get the name from data variable ancillary_variables
attribute.
"""
if qc_var_name is None:
qc_var_name = self._obj.qcfilter.check_for_ancillary_qc(var_name, add_if_missing=False)
if qc_var_name is None:
return
try:
ancillary_variables = self._obj[var_name].attrs['ancillary_variables']
if qc_var_name not in ancillary_variables:
ancillary_variables = ' '.join([ancillary_variables, qc_var_name])
except KeyError:
ancillary_variables = qc_var_name
self._obj[var_name].attrs['ancillary_variables'] = ancillary_variables
def add_test(
self,
var_name,
index=None,
test_number=None,
test_meaning=None,
test_assessment='Bad',
flag_value=False,
recycle=False,
):
"""
Method to add a new test/filter to a quality control variable.
Parameters
----------
var_name : str
data variable name
index : int, bool, list of int or bool, numpy array, tuple of numpy arrays
Indexes into quality control array to set the test bit.
If not set or set to None will not set the test on any
element of the quality control variable but will still
add the test to the flag_masks, flag_meanings and
flag_assessments attributes.
test_number : int
Test number to use. If keyword is not set will use first
available test bit/test number.
recyle : boolean
Option to use number less than next highest test if available. For example
tests 1, 2, 4, 5 are set. Set to true the next test chosen will be 3, else
will be 6.
test_meaning : str
String describing the test. Will be added to flag_meanings
variable attribute.
test_assessment : str
String describing the test assessment. If not set will use
"Bad" as the string to append to flag_assessments. Will
update to be lower case and then capitalized.
flag_value : boolean
Switch to use flag_values integer quality control.
Returns
-------
test_dict : dict
A dictionary containing information added to the QC
variable.
Examples
--------
> result = ds_object.qcfilter.add_test(
var_name, test_meaning='Birds!')
"""
test_dict = {}
if test_meaning is None:
raise ValueError(
'You need to provide a value for test_meaning '
'keyword when calling the add_test method'
)
# This ensures the indexing will work even if given float values.
# Preserves tuples from np.where() or boolean arrays for standard
# python indexing.
if index is not None and not isinstance(index, (np.ndarray, tuple)):
index = np.array(index)
if index.dtype.kind not in np.typecodes['AllInteger']:
index = index.astype(int)
# Ensure assessment is lowercase and capitalized to be consistent
test_assessment = test_assessment.lower().capitalize()
qc_var_name = self._obj.qcfilter.check_for_ancillary_qc(var_name, flag_type=flag_value)
if test_number is None:
test_number = self._obj.qcfilter.available_bit(qc_var_name, recycle=recycle)
self._obj.qcfilter.set_test(var_name, index, test_number, flag_value)
if flag_value:
try:
self._obj[qc_var_name].attrs['flag_values'].append(test_number)
except KeyError:
self._obj[qc_var_name].attrs['flag_values'] = [test_number]
else:
# Determine if flag_masks test number is too large for current data type.
# If so up convert data type.
flag_masks = np.array(self._obj[qc_var_name].attrs['flag_masks'])
mask_dtype = flag_masks.dtype
if not np.issubdtype(mask_dtype, np.integer):
mask_dtype = np.uint32
if np.iinfo(mask_dtype).max - set_bit(0, test_number) <= -1:
if mask_dtype == np.int8 or mask_dtype == np.uint8:
mask_dtype = np.uint16
elif mask_dtype == np.int16 or mask_dtype == np.uint16:
mask_dtype = np.uint32
elif mask_dtype == np.int32 or mask_dtype == np.uint32:
mask_dtype = np.uint64
flag_masks = flag_masks.astype(mask_dtype)
flag_masks = np.append(flag_masks, np.array(set_bit(0, test_number), dtype=mask_dtype))
self._obj[qc_var_name].attrs['flag_masks'] = list(flag_masks)
try:
self._obj[qc_var_name].attrs['flag_meanings'].append(test_meaning)
except KeyError:
self._obj[qc_var_name].attrs['flag_meanings'] = [test_meaning]
try:
self._obj[qc_var_name].attrs['flag_assessments'].append(test_assessment)
except KeyError:
self._obj[qc_var_name].attrs['flag_assessments'] = [test_assessment]
test_dict['test_number'] = test_number
test_dict['test_meaning'] = test_meaning
test_dict['test_assessment'] = test_assessment
test_dict['qc_variable_name'] = qc_var_name
test_dict['variable_name'] = var_name
return test_dict
def remove_test(
self,
var_name=None,
test_number=None,
qc_var_name=None,
flag_value=False,
flag_values_reset_value=0,
):
"""
Method to remove a test/filter from a quality control variable. Must set
var_name or qc_var_name.
Parameters
----------
var_name : str or None
Data variable name.
test_number : int
Test number to remove.
qc_var_name : str or None
Quality control variable name. Ignored if var_name is set.
flag_value : boolean
Switch to use flag_values integer quality control.
flag_values_reset_value : int
Value to use when resetting a flag_values value to not be set.
Examples
--------
> ds_object.qcfilter.remove_test(
var_name, test_number=3)
"""
if test_number is None:
raise ValueError(
'You need to provide a value for test_number '
'keyword when calling the remove_test() method'
)
if var_name is None and qc_var_name is None:
raise ValueError(
'You need to provide a value for var_name or qc_var_name '
'keyword when calling the remove_test() method'
)
if var_name is not None:
qc_var_name = self._obj.qcfilter.check_for_ancillary_qc(var_name)
# Determine which index is using the test number
index = None
if flag_value:
flag_values = self._obj[qc_var_name].attrs['flag_values']
for ii, flag_num in enumerate(flag_values):
if flag_num == test_number:
index = ii
break
else:
flag_masks = self._obj[qc_var_name].attrs['flag_masks']
for ii, bit_num in enumerate(flag_masks):
if parse_bit(bit_num)[0] == test_number:
index = ii
break
# If can't find the index of test return before doing anything.
if index is None:
return
if flag_value:
remove_index = self._obj.qcfilter.get_qc_test_mask(
var_name=var_name,
qc_var_name=qc_var_name,
test_number=test_number,
return_index=True,
flag_value=True,
)
self._obj.qcfilter.unset_test(
var_name=var_name,
qc_var_name=qc_var_name,
index=remove_index,
test_number=test_number,
flag_value=flag_value,
flag_values_reset_value=flag_values_reset_value,
)
del flag_values[index]
self._obj[qc_var_name].attrs['flag_values'] = flag_values
else:
remove_index = self._obj.qcfilter.get_qc_test_mask(
var_name=var_name,
qc_var_name=qc_var_name,
test_number=test_number,
return_index=True,
)
self._obj.qcfilter.unset_test(
var_name=var_name,
qc_var_name=qc_var_name,
index=remove_index,
test_number=test_number,
flag_value=flag_value,
)
del flag_masks[index]
self._obj[qc_var_name].attrs['flag_masks'] = flag_masks
flag_meanings = self._obj[qc_var_name].attrs['flag_meanings']
del flag_meanings[index]
self._obj[qc_var_name].attrs['flag_meanings'] = flag_meanings
flag_assessments = self._obj[qc_var_name].attrs['flag_assessments']
del flag_assessments[index]
self._obj[qc_var_name].attrs['flag_assessments'] = flag_assessments
def set_test(self, var_name, index=None, test_number=None, flag_value=False):
"""
Method to set a test/filter in a quality control variable.
Parameters
----------
var_name : str
Data variable name.
index : int or list or numpy array
Index to set test in quality control array. If want to
unset all values will need to pass in index of all values.
test_number : int
Test number to set.
flag_value : boolean
Switch to use flag_values integer quality control.
Examples
--------
.. code-block:: python
index = [0, 1, 2, 30]
ds_object.qcfilter.set_test(var_name, index=index, test_number=2)
"""
qc_var_name = self._obj.qcfilter.check_for_ancillary_qc(var_name)
qc_variable = np.array(self._obj[qc_var_name].values)
# Ensure the qc_variable data type is integer. This ensures bitwise comparison
# will not cause an error.
if qc_variable.dtype.kind not in np.typecodes['AllInteger']:
qc_variable = qc_variable.astype(int)
# Determine if test number is too large for current data type. If so
# up convert data type.
dtype = qc_variable.dtype
if np.iinfo(dtype).max - set_bit(0, test_number) < -1:
if dtype == np.int8:
dtype = np.int16
elif dtype == np.int16:
dtype = np.int32
elif dtype == np.int32:
dtype = np.int64
qc_variable = qc_variable.astype(dtype)
if index is not None:
if flag_value:
qc_variable[index] = test_number
else:
qc_variable[index] = set_bit(qc_variable[index], test_number)
self._obj[qc_var_name].values = qc_variable
def unset_test(
self,
var_name=None,
qc_var_name=None,
index=None,
test_number=None,
flag_value=False,
flag_values_reset_value=0,
):
"""
Method to unset a test/filter from a quality control variable.
Parameters
----------
var_name : str or None
Data variable name.
qc_var_name : str or None
Quality control variable name. Ignored if var_name is set.
index : int or list or numpy array
Index to unset test in quality control array. If want to
unset all values will need to pass in index of all values.
test_number : int
Test number to remove.
flag_value : boolean
Switch to use flag_values integer quality control.
flag_values_reset_value : int
Value to use when resetting a flag_values value to not be set.
Examples
--------
.. code-block:: python
ds_object.qcfilter.unset_test(var_name, index=0, test_number=2)
"""
if index is None:
return
if var_name is None and qc_var_name is None:
raise ValueError(
'You need to provide a value for var_name or qc_var_name '
'keyword when calling the unset_test() method'
)
if var_name is not None:
qc_var_name = self._obj.qcfilter.check_for_ancillary_qc(var_name)
# Get QC variable
qc_variable = self._obj[qc_var_name].values
# Ensure the qc_variable data type is integer. This ensures bitwise comparison
# will not cause an error.
if qc_variable.dtype.kind not in np.typecodes['AllInteger']:
qc_variable = qc_variable.astype(int)
if flag_value:
qc_variable[index] = flag_values_reset_value
else:
qc_variable[index] = unset_bit(qc_variable[index], test_number)
self._obj[qc_var_name].values = qc_variable
def available_bit(self, qc_var_name, recycle=False):
"""
Method to determine next available bit or flag to use with a QC test.
Will check for flag_masks first and if not found will check for
flag_values. This will drive how the next value is chosen.
Parameters
----------
qc_var_name : str
Quality control variable name.
recycle : boolean
Option to look for a bit (test) not in use starting from 1.
If a test is not defined will return the lowest number, else
will just use next highest number.
Returns
-------
test_num : int
Next available test number.
"""
try:
flag_masks = self._obj[qc_var_name].attrs['flag_masks']
flag_value = False
except KeyError:
try:
flag_masks = self._obj[qc_var_name].attrs['flag_values']
flag_value = True
except KeyError:
try:
self._obj[qc_var_name].attrs['flag_values']
flag_masks = self._obj[qc_var_name].attrs['flag_masks']
flag_value = False
except KeyError:
raise ValueError(
'Problem getting next value from '
'available_bit(). flag_values and '
'flag_masks not set as expected'
)
if flag_masks == []:
next_bit = 1
else:
if flag_value:
if recycle:
next_bit = min(set(range(1, 100000)) - set(flag_masks))
else:
next_bit = max(flag_masks) + 1
else:
if recycle:
tests = [parse_bit(mask)[0] for mask in flag_masks]
next_bit = min(set(range(1, 63)) - set(tests))
else:
next_bit = parse_bit(max(flag_masks))[0] + 1
return int(next_bit)
def get_qc_test_mask(
self,
var_name=None,
test_number=None,
qc_var_name=None,
flag_value=False,
return_index=False,
):
"""
Returns a numpy array of False or True where a particular
flag or bit is set in a numpy array. Must set var_name or qc_var_name
when calling.
Parameters
----------
var_name : str or None
Data variable name.
test_number : int
Test number to return array where test is set.
qc_var_name : str or None
Quality control variable name. Ignored if var_name is set.
flag_value : boolean
Switch to use flag_values integer quality control.
return_index : boolean
Return a numpy array of index numbers into QC array where the
test is set instead of 0 or 1 mask.
Returns
-------
test_mask : bool array
A numpy boolean array with False or True where the test number or
bit was set.
Examples
--------
.. code-block:: python
from act.io.armfiles import read_netcdf
from act.tests import EXAMPLE_IRT25m20s
ds_object = read_netcdf(EXAMPLE_IRT25m20s)
var_name = "inst_up_long_dome_resist"
result = ds_object.qcfilter.add_test(
var_name, index=[0, 1, 2], test_meaning="Birds!"
)
qc_var_name = result["qc_variable_name"]
mask = ds_object.qcfilter.get_qc_test_mask(
var_name, result["test_number"], return_index=True
)
print(mask)
array([0, 1, 2])
mask = ds_object.qcfilter.get_qc_test_mask(var_name, result["test_number"])
print(mask)
array([True, True, True, ..., False, False, False])
data = ds_object[var_name].values
print(data[mask])
array([7.84, 7.8777, 7.8965], dtype=float32)
import numpy as np
data[mask] = np.nan
print(data)
array([nan, nan, nan, ..., 7.6705, 7.6892, 7.6892], dtype=float32)
"""
if var_name is None and qc_var_name is None:
raise ValueError(
'You need to provide a value for var_name or qc_var_name '
'keyword when calling the get_qc_test_mask() method'
)
if test_number is None:
raise ValueError(
'You need to provide a value for test_number '
'keyword when calling the get_qc_test_mask() method'
)
if var_name is not None:
qc_var_name = self._obj.qcfilter.check_for_ancillary_qc(var_name)
qc_variable = self._obj[qc_var_name].values
# Ensure the qc_variable data type is integer. This ensures bitwise comparison
# will not cause an error.
if qc_variable.dtype.kind not in np.typecodes['AllInteger']:
qc_variable = qc_variable.astype(int)
if flag_value:
tripped = qc_variable == test_number
else:
check_bit = set_bit(0, test_number) & qc_variable
tripped = check_bit > 0
test_mask = np.full(qc_variable.shape, False, dtype='bool')
# Make sure test_mask is an array. If qc_variable is scalar will
# be retuned from np.zeros as scalar.
test_mask = np.atleast_1d(test_mask)
test_mask[tripped] = True
test_mask = np.ma.make_mask(test_mask, shrink=False)
if return_index:
test_mask = np.where(test_mask)[0]
return test_mask
def get_masked_data(
self,
var_name,
rm_assessments=None,
rm_tests=None,
return_nan_array=False,
ma_fill_value=None,
return_inverse=False,
):
"""
Returns a numpy masked array containing data and mask or
a numpy float array with masked values set to NaN.
Parameters
----------
var_name : str
Data variable name.
rm_assessments : str or list of str
Assessment name to exclude from returned data.
rm_tests : int or list of int
Test numbers to exclude from returned data. This is the test
number (or bit position number) not the mask number.
return_nan_array : boolean
Return a numpy array with filtered ( or masked) values
set to numpy NaN value. If the data is type int will upconvert
to numpy float to allow setting NaN value.
ma_fill_value : int or float (or str?)
The numpy masked array fill_value used in creation of the the
masked array. If the datatype needs to be upconverted to allow
the fill value to be used, data will be upconverted.
return_inverse : boolean
Invert the masked array mask or return data array where mask is set
to False instead of True set to NaN. Useful for overplotting
where failing.
Returns
-------
variable : numpy masked array or numpy float array
Default is to return a numpy masked array with the mask set to
True where the test with requested assessment or test number
was found set.
If return_nan_array is True will return numpy array upconverted
to float with locations where the test with requested assessment
or test number was found set converted to NaN.
Examples
--------
.. code-block:: python
from act.io.armfiles import read_netcdf
from act.tests import EXAMPLE_IRT25m20s
ds_object = read_netcdf(EXAMPLE_IRT25m20s)
var_name = "inst_up_long_dome_resist"
result = ds_object.qcfilter.add_test(
var_name, index=[0, 1, 2], test_meaning="Birds!"
)
data = ds_object.qcfilter.get_masked_data(
var_name, rm_assessments=["Bad", "Indeterminate"]
)
print(data)
masked_array(
data=[..., 7.670499801635742, 7.689199924468994, 7.689199924468994],
mask=[..., False, False, False],
fill_value=1e20,
dtype=float32,
)
"""
qc_var_name = self._obj.qcfilter.check_for_ancillary_qc(var_name, add_if_missing=False)
flag_value = False
flag_values = None
flag_masks = None
flag_assessments = None
try:
flag_assessments = self._obj[qc_var_name].attrs['flag_assessments']
flag_masks = self._obj[qc_var_name].attrs['flag_masks']
except KeyError:
pass
try:
flag_values = self._obj[qc_var_name].attrs['flag_values']
flag_value = True
except KeyError:
pass
test_numbers = []
if rm_tests is not None:
if isinstance(rm_tests, (int, float, str)):
rm_tests = [int(rm_tests)]
test_numbers.extend(rm_tests)
if rm_assessments is not None:
if isinstance(rm_assessments, str):
rm_assessments = [rm_assessments]
if flag_masks is not None:
test_nums = [parse_bit(mask)[0] for mask in flag_masks]
if flag_values is not None:
test_nums = flag_values
rm_assessments = [x.lower() for x in rm_assessments]
if flag_assessments is not None:
for ii, assessment in enumerate(flag_assessments):
if assessment.lower() in rm_assessments:
test_numbers.append(test_nums[ii])
# Make the list of test numbers to mask unique
test_numbers = list(set(test_numbers))
# Create mask of indexes by looking where each test is set
variable = self._obj[var_name].values
nan_dtype = np.float32
if variable.dtype in (np.float64, np.int64):
nan_dtype = np.float64
mask = np.zeros(variable.shape, dtype=bool)
for test in test_numbers:
mask = mask | self._obj.qcfilter.get_qc_test_mask(var_name, test, flag_value=flag_value)
# Convert data numpy array into masked array
try:
variable = np.ma.array(variable, mask=mask, fill_value=ma_fill_value)
except TypeError:
variable = np.ma.array(
variable,
mask=mask,
fill_value=ma_fill_value,
dtype=np.array(ma_fill_value).dtype,
)
# If requested switch array from where data is not failing tests
# to where data is failing tests. This can be used when over plotting
# where the data if failing the tests.
if return_inverse:
mask = variable.mask
mask = np.invert(mask)
variable.mask = mask
# If asked to return numpy array with values set to NaN
if return_nan_array:
variable = variable.astype(nan_dtype)
variable = variable.filled(fill_value=np.nan)
return variable
def datafilter(
self,
variables=None,
rm_assessments=None,
rm_tests=None,
np_ma=True,
verbose=False,
del_qc_var=True,
):
"""
Method to apply quality control variables to data variables by
changing the data values in the dataset using quality control variables.
The data variable is changed to to a numpy masked array with failing
data masked or, if requested, to numpy array with failing data set to
NaN. This can be used to update the data variable in the xarray
dataset for use with xarray methods to perform analysis on the data
since those methods don't read the quality control variables.
Parameters
----------
variables : None or str or list of str
Data variable names to process
rm_assessments : str or list of str
Assessment names listed under quality control varible flag_assessments
to exclude from returned data. Examples include
['Bad', 'Incorrect', 'Indeterminate', 'Suspect']
rm_tests : int or list of int
Test numbers listed under quality control variable to exclude from
returned data. This is the test
number (or bit position number) not the mask number.
np_ma : boolean
Shoudl the data in the xarray DataArray be set to numpy masked
arrays. This should work with most xarray methods. If the xarray
processing method does not work with numpy masked array set to
False to use NaN.
verbose : boolean
Print processing information.
del_qc_var : boolean
Opttion to delete quality control variable after processing. Since
the data values can not be determined after they are set to NaN
and xarray method processing would also process the quality control
variables, the default is to remove the quality control data
variables. If numpy masked arrays are used the data are not lost
but would need to be extracted and set to DataArray to return the
dataset back to original state.
Examples
--------
.. code-block:: python
from act.io.armfiles import read_netcdf
from act.tests import EXAMPLE_MET1
ds = read_netcdf(EXAMPLE_MET1)
ds.clean.cleanup()
var_name = "atmos_pressure"
ds_1 = ds.mean()
ds.qcfilter.add_less_test(var_name, 99, test_assessment="Bad")
ds.qcfilter.datafilter(rm_assessments="Bad")
ds_2 = ds.mean()
print("All_data=", ds_1[var_name].values)
All_data = 98.86098
print("Bad_Removed=", ds_2[var_name].values)
Bad_Removed = 99.15148
"""
if variables is not None and isinstance(variables, str):
variables = [variables]
if variables is None:
variables = list(self._obj.data_vars)
for var_name in variables:
qc_var_name = self.check_for_ancillary_qc(var_name, add_if_missing=False, cleanup=False)
if qc_var_name is None:
if verbose:
print(
f'No quality control variable for {var_name} found '
f'in call to .qcfilter.datafilter()'
)
continue
data = self.get_masked_data(
var_name,
rm_assessments=rm_assessments,
rm_tests=rm_tests,
ma_fill_value=np_ma,
)
self._obj[var_name].values = data
if del_qc_var:
del self._obj[qc_var_name]
if verbose:
print(f'Deleting {qc_var_name} from dataset')
def set_bit(array, bit_number):
"""
Function to set a quality control bit given an a scalar or
array of values and a bit number.
Parameters
----------
array : int or numpy array
The bitpacked array to set the bit number.
bit_number : int
The bit (or test) number to set.
Returns
-------
array : int, numpy array, tuple, list
Integer or numpy array with bit set for each element of the array.
Returned in same type.
Examples
--------
Example use setting bit 2 to an array called data:
.. code-block:: python
data = np.array(range(0, 7))
data = set_bit(data, 2)
print(data)
array([2, 3, 2, 3, 6, 7, 6])
"""
was_list = False
was_tuple = False
if isinstance(array, list):
array = np.array(array)
was_list = True
if isinstance(array, tuple):
array = np.array(array)
was_tuple = True
if bit_number > 0:
array |= 1 << bit_number - 1
if was_list:
array = list(array)
if was_tuple:
array = tuple(array)
return array
def unset_bit(array, bit_number):
"""
Function to remove a quality control bit given a
scalar or array of values and a bit number.
Parameters
----------
array : int or numpy array
Array of integers containing bit packed numbers.
bit_number : int
Bit number to remove.
Returns
-------
array : int or numpy array
Returns same data type as array entered with bit removed. Will
fail gracefully if the bit requested to be removed was not set.
Examples
--------
Example use removing bit 2 from an array called data:
> data = set_bit(0,2)
> data = set_bit(data,3)
> data
6
> data = unset_bit(data,2)
> data
4
"""
was_list = False
was_tuple = False
if isinstance(array, list):
array = np.array(array)
was_list = True
if isinstance(array, tuple):
array = np.array(array)
was_tuple = True
if bit_number > 0:
array = array & ~(1 << bit_number - 1)
if was_list:
array = list(array)
if was_tuple:
array = tuple(array)
return array
def parse_bit(qc_bit):
"""
Given a single integer value, return bit positions.
Parameters
----------
qc_bit : int or numpy int
Bit packed integer number to be parsed.
Returns
-------
bit_number : numpy.int32 array
Array containing all bit numbers of the bit packed number.
If no bits set returns empty array.
Examples
--------
> parse_bit(7)
array([1, 2, 3])
"""
if isinstance(qc_bit, (list, tuple, np.ndarray)):
if len(qc_bit) > 1:
raise ValueError('Must be a single value.')
qc_bit = qc_bit[0]
if qc_bit < 0:
raise ValueError('Must be a positive integer.')
bit_number = []
qc_bit = int(qc_bit)
counter = 0
while qc_bit > 0:
temp_value = qc_bit % 2
qc_bit = qc_bit >> 1
counter += 1
if temp_value == 1:
bit_number.append(counter)
bit_number = np.asarray(bit_number, dtype=np.int32)
return bit_number
<|code_end|>
|
Usability Issue: plot_barbs_from_spd_dir takes in direction, speed
For example:
`TimeSeriesDisplay.plot_barbs_from_spd_dir(dir_field, spd_field, pres_field=None, dsname=None, **kwargs)`
It would make sense to change the input to spd_field, dir_field or rename the function for consistency.
|
act/plotting/timeseriesdisplay.py
<|code_start|>"""
Stores the class for TimeSeriesDisplay.
"""
import datetime as dt
import warnings
from copy import deepcopy
from re import search, search as re_search
import matplotlib.dates as mdates
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from matplotlib import colors as mplcolors
from mpl_toolkits.axes_grid1 import make_axes_locatable
from scipy.interpolate import NearestNDInterpolator
from ..qc.qcfilter import parse_bit
from ..utils import data_utils, datetime_utils as dt_utils
from ..utils.datetime_utils import determine_time_delta, reduce_time_ranges
from ..utils.geo_utils import get_sunrise_sunset_noon
from . import common
from .plot import Display
class TimeSeriesDisplay(Display):
"""
This subclass contains routines that are specific to plotting
time series plots from data. It is inherited from Display and therefore
contains all of Display's attributes and methods.
Examples
--------
To create a TimeSeriesDisplay with 3 rows, simply do:
.. code-block:: python
ds = act.read_netcdf(the_file)
disp = act.plotting.TimeSeriesDisplay(ds, subplot_shape=(3,), figsize=(15, 5))
The TimeSeriesDisplay constructor takes in the same keyword arguments as
plt.subplots. For more information on the plt.subplots keyword arguments,
see the `matplotlib documentation
<https://matplotlib.org/api/_as_gen/matplotlib.pyplot.subplots.html>`_.
If no subplot_shape is provided, then no figure or axis will be created
until add_subplots or plots is called.
"""
def __init__(self, obj, subplot_shape=(1,), ds_name=None, **kwargs):
super().__init__(obj, subplot_shape, ds_name, **kwargs)
def day_night_background(self, dsname=None, subplot_index=(0,)):
"""
Colorcodes the background according to sunrise/sunset.
Parameters
----------
dsname : None or str
If there is more than one datastream in the display object the
name of the datastream needs to be specified. If set to None and
there is only one datastream then ACT will use the sole datastream
in the object.
subplot_index : 1 or 2D tuple, list, or array
The index to the subplot to place the day and night background in.
"""
if dsname is None and len(self._obj.keys()) > 1:
raise ValueError(
'You must choose a datastream to derive the '
+ 'information needed for the day and night '
+ 'background when 2 or more datasets are in '
+ 'the display object.'
)
elif dsname is None:
dsname = list(self._obj.keys())[0]
# Get File Dates
try:
file_dates = self._obj[dsname].attrs['_file_dates']
except KeyError:
file_dates = []
if len(file_dates) == 0:
sdate = dt_utils.numpy_to_arm_date(self._obj[dsname].time.values[0])
edate = dt_utils.numpy_to_arm_date(self._obj[dsname].time.values[-1])
file_dates = [sdate, edate]
all_dates = dt_utils.dates_between(file_dates[0], file_dates[-1])
if self.axes is None:
raise RuntimeError('day_night_background requires the plot to ' 'be displayed.')
ax = self.axes[subplot_index]
# Find variable names for latitude and longitude
variables = list(self._obj[dsname].data_vars)
lat_name = [var for var in ['lat', 'latitude'] if var in variables]
lon_name = [var for var in ['lon', 'longitude'] if var in variables]
if len(lat_name) == 0:
lat_name = None
else:
lat_name = lat_name[0]
if len(lon_name) == 0:
lon_name = None
else:
lon_name = lon_name[0]
# Variable name does not match, look for standard_name declaration
if lat_name is None or lon_name is None:
for var in variables:
try:
if self._obj[dsname][var].attrs['standard_name'] == 'latitude':
lat_name = var
except KeyError:
pass
try:
if self._obj[dsname][var].attrs['standard_name'] == 'longitude':
lon_name = var
except KeyError:
pass
if lat_name is not None and lon_name is not None:
break
if lat_name is None or lon_name is None:
return
try:
if self._obj[dsname][lat_name].data.size > 1:
# Look for non-NaN values to use for locaiton. If not found use first value.
lat = self._obj[dsname][lat_name].values
index = np.where(np.isfinite(lat))[0]
if index.size == 0:
index = [0]
lat = float(lat[index[0]])
# Look for non-NaN values to use for locaiton. If not found use first value.
lon = self._obj[dsname][lon_name].values
index = np.where(np.isfinite(lon))[0]
if index.size == 0:
index = [0]
lon = float(lon[index[0]])
else:
lat = float(self._obj[dsname][lat_name].values)
lon = float(self._obj[dsname][lon_name].values)
except AttributeError:
return
if not np.isfinite(lat):
warnings.warn(f"Latitude value in dataset of '{lat}' is not finite. ", RuntimeWarning)
return
if not np.isfinite(lon):
warnings.warn(f"Longitude value in dataset of '{lon}' is not finite. ", RuntimeWarning)
return
lat_range = [-90, 90]
if not (lat_range[0] <= lat <= lat_range[1]):
warnings.warn(
f"Latitude value in dataset of '{lat}' not within acceptable "
f'range of {lat_range[0]} <= latitude <= {lat_range[1]}. ',
RuntimeWarning,
)
return
lon_range = [-180, 180]
if not (lon_range[0] <= lon <= lon_range[1]):
warnings.warn(
f"Longitude value in dataset of '{lon}' not within acceptable "
f'range of {lon_range[0]} <= longitude <= {lon_range[1]}. ',
RuntimeWarning,
)
return
# initialize the plot to a gray background for total darkness
rect = ax.patch
rect.set_facecolor('0.85')
# Get date ranges to plot
plot_dates = []
for f in all_dates:
for ii in [-1, 0, 1]:
plot_dates.append(f + dt.timedelta(days=ii))
# Get sunrise, sunset and noon times
sunrise, sunset, noon = get_sunrise_sunset_noon(lat, lon, plot_dates)
# Plot daylight
for ii in range(0, len(sunrise)):
ax.axvspan(sunrise[ii], sunset[ii], facecolor='#FFFFCC', zorder=0)
# Plot noon line
for ii in noon:
ax.axvline(x=ii, linestyle='--', color='y', zorder=1)
def set_xrng(self, xrng, subplot_index=(0,)):
"""
Sets the x range of the plot.
Parameters
----------
xrng : 2 number array
The x limits of the plot.
subplot_index : 1 or 2D tuple, list, or array
The index of the subplot to set the x range of.
"""
if self.axes is None:
raise RuntimeError('set_xrng requires the plot to be displayed.')
if not hasattr(self, 'xrng') and len(self.axes.shape) == 2:
self.xrng = np.zeros((self.axes.shape[0], self.axes.shape[1], 2), dtype='datetime64[D]')
elif not hasattr(self, 'xrng') and len(self.axes.shape) == 1:
self.xrng = np.zeros((self.axes.shape[0], 2), dtype='datetime64[D]')
with warnings.catch_warnings():
warnings.filterwarnings('ignore', category=UserWarning)
self.axes[subplot_index].set_xlim(xrng)
self.xrng[subplot_index, :] = np.array(xrng, dtype='datetime64[D]')
def set_yrng(self, yrng, subplot_index=(0,)):
"""
Sets the y range of the plot.
Parameters
----------
yrng : 2 number array
The y limits of the plot.
subplot_index : 1 or 2D tuple, list, or array
The index of the subplot to set the x range of.
"""
if self.axes is None:
raise RuntimeError('set_yrng requires the plot to be displayed.')
if not hasattr(self, 'yrng') and len(self.axes.shape) == 2:
self.yrng = np.zeros((self.axes.shape[0], self.axes.shape[1], 2))
elif not hasattr(self, 'yrng') and len(self.axes.shape) == 1:
self.yrng = np.zeros((self.axes.shape[0], 2))
if yrng[0] == yrng[1]:
yrng[1] = yrng[1] + 1
self.axes[subplot_index].set_ylim(yrng)
self.yrng[subplot_index, :] = yrng
def plot(
self,
field,
dsname=None,
subplot_index=(0,),
cmap=None,
set_title=None,
add_nan=False,
day_night_background=False,
invert_y_axis=False,
abs_limits=(None, None),
time_rng=None,
y_rng=None,
use_var_for_y=None,
set_shading='auto',
assessment_overplot=False,
overplot_marker='.',
overplot_behind=False,
overplot_markersize=6,
assessment_overplot_category={
'Incorrect': ['Bad', 'Incorrect'],
'Suspect': ['Indeterminate', 'Suspect'],
},
assessment_overplot_category_color={'Incorrect': 'red', 'Suspect': 'orange'},
force_line_plot=False,
labels=False,
cbar_label=None,
secondary_y=False,
**kwargs,
):
"""
Makes a timeseries plot. If subplots have not been added yet, an axis
will be created assuming that there is only going to be one plot.
Parameters
----------
field : str
The name of the field to plot.
dsname : None or str
If there is more than one datastream in the display object the
name of the datastream needs to be specified. If set to None and
there is only one datastream ACT will use the sole datastream
in the object.
subplot_index : 1 or 2D tuple, list, or array
The index of the subplot to set the x range of.
cmap : matplotlib colormap
The colormap to use.
set_title : str
The title for the plot.
add_nan : bool
Set to True to fill in data gaps with NaNs.
day_night_background : bool
Set to True to fill in a color coded background.
according to the time of day.
abs_limits : tuple or list
Sets the bounds on plot limits even if data values exceed
those limits. Set to (ymin,ymax). Use None if only setting
minimum or maximum limit, i.e. (22., None).
time_rng : tuple or list
List or tuple with (min, max) values to set the x-axis range
limits.
y_rng : tuple or list
List or tuple with (min, max) values to set the y-axis range
use_var_for_y : str
Set this to the name of a data variable in the Dataset to use as
the y-axis variable instead of the default dimension. Useful for
instances where data has an index-based dimension instead of a
height-based dimension. If shapes of arrays do not match it will
automatically revert back to the original ydata.
set_shading : string
Option to to set the matplotlib.pcolormesh shading parameter.
Default to 'auto'
assessment_overplot : boolean
Option to overplot quality control colored symbols over plotted
data using flag_assessment categories.
overplot_marker : str
Marker to use for overplot symbol.
overplot_behind : bool
Place the overplot marker behind the data point.
overplot_markersize : float or int
Size of overplot marker. If overplot_behind or force_line_plot
are set the marker size will be double overplot_markersize so
the color is visible.
assessment_overplot_category : dict
Lookup to categorize assessments into groups. This allows using
multiple terms for the same quality control level of failure.
Also allows adding more to the defaults.
assessment_overplot_category_color : dict
Lookup to match overplot category color to assessment grouping.
force_line_plot : boolean
Option to plot 2D data as 1D line plots.
labels : boolean or list
Option to overwrite the legend labels. Must have same dimensions as
number of lines plotted.
cbar_label : str
Option to overwrite default colorbar label.
secondary_y : boolean
Option to plot on secondary y axis.
**kwargs : keyword arguments
The keyword arguments for :func:`plt.plot` (1D timeseries) or
:func:`plt.pcolormesh` (2D timeseries).
Returns
-------
ax : matplotlib axis handle
The matplotlib axis handle of the plot.
"""
if dsname is None and len(self._obj.keys()) > 1:
raise ValueError(
'You must choose a datastream when there are 2 '
'or more datasets in the TimeSeriesDisplay '
'object.'
)
elif dsname is None:
dsname = list(self._obj.keys())[0]
# Get data and dimensions
data = self._obj[dsname][field]
dim = list(self._obj[dsname][field].dims)
xdata = self._obj[dsname][dim[0]]
if 'units' in data.attrs:
ytitle = ''.join(['(', data.attrs['units'], ')'])
else:
ytitle = field
if cbar_label is None:
cbar_default = ytitle
if len(dim) > 1:
if use_var_for_y is None:
ydata = self._obj[dsname][dim[1]]
else:
ydata = self._obj[dsname][use_var_for_y]
ydata_dim1 = self._obj[dsname][dim[1]]
if np.shape(ydata) != np.shape(ydata_dim1):
ydata = ydata_dim1
units = ytitle
if 'units' in ydata.attrs.keys():
units = ydata.attrs['units']
ytitle = ''.join(['(', units, ')'])
else:
units = ''
ytitle = dim[1]
# Create labels if 2d as 1d
if force_line_plot is True:
if labels is True:
labels = [' '.join([str(d), units]) for d in ydata.values]
ytitle = f"({data.attrs['units']})"
ydata = None
else:
ydata = None
# Get the current plotting axis, add day/night background and plot data
if self.fig is None:
self.fig = plt.figure()
if self.axes is None:
self.axes = np.array([plt.axes()])
self.fig.add_axes(self.axes[0])
# Set up secondary y axis if requested
if secondary_y is False:
ax = self.axes[subplot_index]
else:
ax = self.axes[subplot_index].twinx()
if ydata is None:
if day_night_background is True:
self.day_night_background(subplot_index=subplot_index, dsname=dsname)
# If limiting data being plotted use masked arrays
# Need to do it this way because of autoscale() method
if abs_limits[0] is not None and abs_limits[1] is not None:
data = np.ma.masked_outside(data, abs_limits[0], abs_limits[1])
elif abs_limits[0] is not None and abs_limits[1] is None:
data = np.ma.masked_less_equal(data, abs_limits[0])
elif abs_limits[0] is None and abs_limits[1] is not None:
data = np.ma.masked_greater_equal(data, abs_limits[1])
# Plot the data
if 'marker' not in kwargs.keys():
kwargs['marker'] = '.'
lines = ax.plot(xdata, data, **kwargs)
# Check if we need to call legend method after plotting. This is only
# called when no assessment overplot is called.
add_legend = False
if 'label' in kwargs.keys():
add_legend = True
# Overplot failing data if requested
if assessment_overplot:
# If we are doing forced line plot from 2D data need to manage
# legend lables. Will make arrays to hold labels of QC failing
# because not set when labels not set.
if not isinstance(labels, list) and add_legend is False:
labels = []
lines = []
# For forced line plot need to plot QC behind point instead of
# on top of point.
zorder = None
if force_line_plot or overplot_behind:
zorder = 0
overplot_markersize *= 2.0
for assessment, categories in assessment_overplot_category.items():
flag_data = self._obj[dsname].qcfilter.get_masked_data(
field, rm_assessments=categories, return_inverse=True
)
if np.invert(flag_data.mask).any() and np.isfinite(flag_data).any():
try:
flag_data.mask = np.logical_or(data.mask, flag_data.mask)
except AttributeError:
pass
qc_ax = ax.plot(
xdata,
flag_data,
marker=overplot_marker,
linestyle='',
markersize=overplot_markersize,
color=assessment_overplot_category_color[assessment],
label=assessment,
zorder=zorder,
)
# If labels keyword is set need to add labels for calling legend
if isinstance(labels, list):
# If plotting forced_line_plot need to subset the Line2D object
# so we don't have more than one added to legend.
if len(qc_ax) > 1:
lines.extend(qc_ax[:1])
else:
lines.extend(qc_ax)
labels.append(assessment)
add_legend = True
# Add legend if labels are available
if isinstance(labels, list):
ax.legend(lines, labels)
elif add_legend:
ax.legend()
else:
# Add in nans to ensure the data are not streaking
if add_nan is True:
xdata, data = data_utils.add_in_nan(xdata, data)
# Sets shading parameter to auto. Matplotlib will check deminsions.
# If X,Y and C are same deminsions shading is set to nearest.
# If X and Y deminsions are 1 greater than C shading is set to flat.
if 'edgecolors' not in kwargs.keys():
kwargs['edgecolors'] = 'face'
mesh = ax.pcolormesh(
np.asarray(xdata),
ydata,
data.transpose(),
shading=set_shading,
cmap=cmap,
**kwargs,
)
# Set Title
if set_title is None:
if isinstance(self._obj[dsname].time.values[0], np.datetime64):
set_title = ' '.join(
[
dsname,
field,
'on',
dt_utils.numpy_to_arm_date(self._obj[dsname].time.values[0]),
]
)
else:
date_result = search(
r'\d{4}-\d{1,2}-\d{1,2}', self._obj[dsname].time.attrs['units']
)
if date_result is not None:
set_title = ' '.join([dsname, field, 'on', date_result.group(0)])
else:
set_title = ' '.join([dsname, field])
if secondary_y is False:
ax.set_title(set_title)
# Set YTitle
ax.set_ylabel(ytitle)
# Set X Limit - We want the same time axes for all subplots
if not hasattr(self, 'time_rng'):
if time_rng is not None:
self.time_rng = list(time_rng)
else:
self.time_rng = [xdata.min().values, xdata.max().values]
self.set_xrng(self.time_rng, subplot_index)
# Set Y Limit
if y_rng is not None:
self.set_yrng(y_rng)
if hasattr(self, 'yrng'):
# Make sure that the yrng is not just the default
if ydata is None:
if abs_limits[0] is not None or abs_limits[1] is not None:
our_data = data
else:
our_data = data.values
else:
our_data = ydata
finite = np.isfinite(our_data)
if finite.any():
our_data = our_data[finite]
if invert_y_axis is False:
yrng = [np.min(our_data), np.max(our_data)]
else:
yrng = [np.max(our_data), np.min(our_data)]
else:
yrng = [0, 1]
# Check if current range is outside of new range an only set
# values that work for all data plotted.
if isinstance(yrng[0], np.datetime64):
yrng = mdates.datestr2num([str(yrng[0]), str(yrng[1])])
current_yrng = ax.get_ylim()
if yrng[0] > current_yrng[0]:
yrng[0] = current_yrng[0]
if yrng[1] < current_yrng[1]:
yrng[1] = current_yrng[1]
# Set y range the normal way if not secondary y
# If secondary, just use set_ylim
if secondary_y is False:
self.set_yrng(yrng, subplot_index)
else:
ax.set_ylim(yrng)
# Set X Format
if len(subplot_index) == 1:
days = self.xrng[subplot_index, 1] - self.xrng[subplot_index, 0]
else:
days = (
self.xrng[subplot_index[0], subplot_index[1], 1]
- self.xrng[subplot_index[0], subplot_index[1], 0]
)
myFmt = common.get_date_format(days)
ax.xaxis.set_major_formatter(myFmt)
# Set X format - We want the same time axes for all subplots
if not hasattr(self, 'time_fmt'):
self.time_fmt = myFmt
# Put on an xlabel, but only if we are making the bottom-most plot
if subplot_index[0] == self.axes.shape[0] - 1:
ax.set_xlabel('Time [UTC]')
if ydata is not None:
if cbar_label is None:
self.add_colorbar(mesh, title=cbar_default, subplot_index=subplot_index)
else:
self.add_colorbar(
mesh,
title=''.join(['(', cbar_label, ')']),
subplot_index=subplot_index,
)
return ax
def plot_barbs_from_spd_dir(self, dir_field, spd_field, pres_field=None, dsname=None, **kwargs):
"""
This procedure will make a wind barb plot timeseries.
If a pressure field is given and the wind fields are 1D, which, for
example, would occur if one wants to plot a timeseries of
rawinsonde data, then a time-height cross section of
winds will be made.
Note: This procedure calls plot_barbs_from_u_v and will take in the
same keyword arguments as that procedure.
Parameters
----------
dir_field : str
The name of the field specifying the wind direction in degrees.
0 degrees is defined to be north and increases clockwise like
what is used in standard meteorological notation.
spd_field : str
The name of the field specifying the wind speed in m/s.
pres_field : str
The name of the field specifying pressure or height. If using
height coordinates, then we recommend setting invert_y_axis
to False.
dsname : str
The name of the datastream to plot. Setting to None will make
ACT attempt to autodetect this.
kwargs : dict
Any additional keyword arguments will be passed into
:func:`act.plotting.TimeSeriesDisplay.plot_barbs_from_u_and_v`.
Returns
-------
the_ax : matplotlib axis handle
The handle to the axis where the plot was made on.
Examples
--------
..code-block :: python
sonde_ds = act.io.armfiles.read_netcdf(
act.tests.sample_files.EXAMPLE_TWP_SONDE_WILDCARD)
BarbDisplay = act.plotting.TimeSeriesDisplay(
{'sonde_darwin': sonde_ds}, figsize=(10,5))
BarbDisplay.plot_barbs_from_spd_dir('deg', 'wspd', 'pres',
num_barbs_x=20)
"""
if dsname is None and len(self._obj.keys()) > 1:
raise ValueError(
'You must choose a datastream when there are 2 '
'or more datasets in the TimeSeriesDisplay '
'object.'
)
elif dsname is None:
dsname = list(self._obj.keys())[0]
# Make temporary field called tempu, tempv
spd = self._obj[dsname][spd_field]
dir = self._obj[dsname][dir_field]
tempu = -np.sin(np.deg2rad(dir)) * spd
tempv = -np.cos(np.deg2rad(dir)) * spd
self._obj[dsname]['temp_u'] = deepcopy(self._obj[dsname][spd_field])
self._obj[dsname]['temp_v'] = deepcopy(self._obj[dsname][spd_field])
self._obj[dsname]['temp_u'].values = tempu
self._obj[dsname]['temp_v'].values = tempv
the_ax = self.plot_barbs_from_u_v('temp_u', 'temp_v', pres_field, dsname, **kwargs)
del self._obj[dsname]['temp_u'], self._obj[dsname]['temp_v']
return the_ax
def plot_barbs_from_u_v(
self,
u_field,
v_field,
pres_field=None,
dsname=None,
subplot_index=(0,),
set_title=None,
day_night_background=False,
invert_y_axis=True,
num_barbs_x=20,
num_barbs_y=20,
use_var_for_y=None,
**kwargs,
):
"""
This function will plot a wind barb timeseries from u and v wind
data. If pres_field is given, a time-height series will be plotted
from 1-D wind data.
Parameters
----------
u_field : str
The name of the field containing the U component of the wind.
v_field : str
The name of the field containing the V component of the wind.
pres_field : str or None
The name of the field containing the pressure or height. Set
to None to not use this.
dsname : str or None
The name of the datastream to plot. Setting to None will make
ACT automatically try to determine this.
subplot_index : 2-tuple
The index of the subplot to make the plot on.
set_title : str or None
The title of the plot.
day_night_background : bool
Set to True to plot a day/night background.
invert_y_axis : bool
Set to True to invert the y axis (i.e. for plotting pressure as
the height coordinate).
num_barbs_x : int
The number of wind barbs to plot in the x axis.
num_barbs_y : int
The number of wind barbs to plot in the y axis.
cmap : matplotlib.colors.LinearSegmentedColormap
A color map to use with wind barbs. If this is set the plt.barbs
routine will be passed the C parameter scaled as sqrt of sum of the
squares and used with the passed in color map. A colorbar will also
be added. Setting the limits of the colorbar can be done with 'clim'.
Setting this changes the wind barbs from black to colors.
use_var_for_y : str
Set this to the name of a data variable in the Dataset to use as the
y-axis variable instead of the default dimension. Useful for instances
where data has an index-based dimension instead of a height-based
dimension. If shapes of arrays do not match it will automatically
revert back to the original ydata.
**kwargs : keyword arguments
Additional keyword arguments will be passed into plt.barbs.
Returns
-------
ax : matplotlib axis handle
The axis handle that contains the reference to the
constructed plot.
"""
if dsname is None and len(self._obj.keys()) > 1:
raise ValueError(
'You must choose a datastream when there are 2 '
'or more datasets in the TimeSeriesDisplay '
'object.'
)
elif dsname is None:
dsname = list(self._obj.keys())[0]
# Get data and dimensions
u = self._obj[dsname][u_field].values
v = self._obj[dsname][v_field].values
dim = list(self._obj[dsname][u_field].dims)
xdata = self._obj[dsname][dim[0]].values
num_x = xdata.shape[-1]
barb_step_x = round(num_x / num_barbs_x)
if barb_step_x == 0:
barb_step_x = 1
if len(dim) > 1 and pres_field is None:
if use_var_for_y is None:
ydata = self._obj[dsname][dim[1]]
else:
ydata = self._obj[dsname][use_var_for_y]
ydata_dim1 = self._obj[dsname][dim[1]]
if np.shape(ydata) != np.shape(ydata_dim1):
ydata = ydata_dim1
if 'units' in ydata.attrs:
units = ydata.attrs['units']
else:
units = ''
ytitle = ''.join(['(', units, ')'])
num_y = ydata.shape[0]
barb_step_y = round(num_y / num_barbs_y)
if barb_step_y == 0:
barb_step_y = 1
xdata, ydata = np.meshgrid(xdata, ydata, indexing='ij')
elif pres_field is not None:
# What we will do here is do a nearest-neighbor interpolation
# for each member of the series. Coordinates are time, pressure
pres = self._obj[dsname][pres_field]
u_interp = NearestNDInterpolator((xdata, pres.values), u, rescale=True)
v_interp = NearestNDInterpolator((xdata, pres.values), v, rescale=True)
barb_step_x = 1
barb_step_y = 1
x_times = pd.date_range(xdata.min(), xdata.max(), periods=num_barbs_x)
if num_barbs_y == 1:
y_levels = pres.mean()
else:
y_levels = np.linspace(np.nanmin(pres), np.nanmax(pres), num_barbs_y)
xdata, ydata = np.meshgrid(x_times, y_levels, indexing='ij')
u = u_interp(xdata, ydata)
v = v_interp(xdata, ydata)
if 'units' in pres.attrs:
units = pres.attrs['units']
else:
units = ''
ytitle = ''.join(['(', units, ')'])
else:
ydata = None
# Get the current plotting axis, add day/night background and plot data
if self.fig is None:
self.fig = plt.figure()
if self.axes is None:
self.axes = np.array([plt.axes()])
self.fig.add_axes(self.axes[0])
if ydata is None:
ydata = np.ones(xdata.shape)
if 'cmap' in kwargs.keys():
map_color = np.sqrt(np.power(u[::barb_step_x], 2) + np.power(v[::barb_step_x], 2))
map_color[np.isnan(map_color)] = 0
ax = self.axes[subplot_index].barbs(
xdata[::barb_step_x],
ydata[::barb_step_x],
u[::barb_step_x],
v[::barb_step_x],
map_color,
**kwargs,
)
plt.colorbar(
ax,
ax=[self.axes[subplot_index]],
label='Wind Speed (' + self._obj[dsname][u_field].attrs['units'] + ')',
)
else:
self.axes[subplot_index].barbs(
xdata[::barb_step_x],
ydata[::barb_step_x],
u[::barb_step_x],
v[::barb_step_x],
**kwargs,
)
self.axes[subplot_index].set_yticks([])
else:
if 'cmap' in kwargs.keys():
map_color = np.sqrt(
np.power(u[::barb_step_x, ::barb_step_y], 2)
+ np.power(v[::barb_step_x, ::barb_step_y], 2)
)
map_color[np.isnan(map_color)] = 0
ax = self.axes[subplot_index].barbs(
xdata[::barb_step_x, ::barb_step_y],
ydata[::barb_step_x, ::barb_step_y],
u[::barb_step_x, ::barb_step_y],
v[::barb_step_x, ::barb_step_y],
map_color,
**kwargs,
)
plt.colorbar(
ax,
ax=[self.axes[subplot_index]],
label='Wind Speed (' + self._obj[dsname][u_field].attrs['units'] + ')',
)
else:
ax = self.axes[subplot_index].barbs(
xdata[::barb_step_x, ::barb_step_y],
ydata[::barb_step_x, ::barb_step_y],
u[::barb_step_x, ::barb_step_y],
v[::barb_step_x, ::barb_step_y],
**kwargs,
)
if day_night_background is True:
self.day_night_background(subplot_index=subplot_index, dsname=dsname)
# Set Title
if set_title is None:
set_title = ' '.join(
[
dsname,
'on',
dt_utils.numpy_to_arm_date(self._obj[dsname].time.values[0]),
]
)
self.axes[subplot_index].set_title(set_title)
# Set YTitle
if 'ytitle' in locals():
self.axes[subplot_index].set_ylabel(ytitle)
# Set X Limit - We want the same time axes for all subplots
time_rng = [xdata.min(), xdata.max()]
self.set_xrng(time_rng, subplot_index)
# Set Y Limit
if hasattr(self, 'yrng'):
# Make sure that the yrng is not just the default
if not np.all(self.yrng[subplot_index] == 0):
self.set_yrng(self.yrng[subplot_index], subplot_index)
else:
if ydata is None:
our_data = xdata
else:
our_data = ydata
if np.isfinite(our_data).any():
if invert_y_axis is False:
yrng = [np.nanmin(our_data), np.nanmax(our_data)]
else:
yrng = [np.nanmax(our_data), np.nanmin(our_data)]
else:
yrng = [0, 1]
self.set_yrng(yrng, subplot_index)
# Set X Format
if len(subplot_index) == 1:
days = self.xrng[subplot_index, 1] - self.xrng[subplot_index, 0]
else:
days = (
self.xrng[subplot_index[0], subplot_index[1], 1]
- self.xrng[subplot_index[0], subplot_index[1], 0]
)
# Put on an xlabel, but only if we are making the bottom-most plot
if subplot_index[0] == self.axes.shape[0] - 1:
self.axes[subplot_index].set_xlabel('Time [UTC]')
myFmt = common.get_date_format(days)
self.axes[subplot_index].xaxis.set_major_formatter(myFmt)
return self.axes[subplot_index]
def plot_time_height_xsection_from_1d_data(
self,
data_field,
pres_field,
dsname=None,
subplot_index=(0,),
set_title=None,
day_night_background=False,
num_time_periods=20,
num_y_levels=20,
invert_y_axis=True,
cbar_label=None,
set_shading='auto',
**kwargs,
):
"""
This will plot a time-height cross section from 1D datasets using
nearest neighbor interpolation on a regular time by height grid.
All that is needed are a data variable and a height variable.
Parameters
----------
data_field : str
The name of the field to plot.
pres_field : str
The name of the height or pressure field to plot.
dsname : str or None
The name of the datastream to plot
subplot_index : 2-tuple
The index of the subplot to create the plot on.
set_title : str or None
The title of the plot.
day_night_background : bool
Set to true to plot the day/night background.
num_time_periods : int
Set to determine how many time periods. Setting to None
will do one time period per day.
num_y_levels : int
The number of levels in the y axis to use.
invert_y_axis : bool
Set to true to invert the y-axis (recommended for
pressure coordinates).
cbar_label : str
Option to overwrite default colorbar label.
set_shading : string
Option to to set the matplotlib.pcolormesh shading parameter.
Default to 'auto'
**kwargs : keyword arguments
Additional keyword arguments will be passed
into :func:`plt.pcolormesh`
Returns
-------
ax : matplotlib axis handle
The matplotlib axis handle pointing to the plot.
"""
if dsname is None and len(self._obj.keys()) > 1:
raise ValueError(
'You must choose a datastream when there are 2'
'or more datasets in the TimeSeriesDisplay'
'object.'
)
elif dsname is None:
dsname = list(self._obj.keys())[0]
dim = list(self._obj[dsname][data_field].dims)
if len(dim) > 1:
raise ValueError(
'plot_time_height_xsection_from_1d_data only '
'supports 1-D datasets. For datasets with 2 or '
'more dimensions use plot().'
)
# Get data and dimensions
data = self._obj[dsname][data_field].values
xdata = self._obj[dsname][dim[0]].values
# What we will do here is do a nearest-neighbor interpolation for each
# member of the series. Coordinates are time, pressure
pres = self._obj[dsname][pres_field]
u_interp = NearestNDInterpolator((xdata, pres.values), data, rescale=True)
# Mask points where we have no data
# Count number of unique days
x_times = pd.date_range(xdata.min(), xdata.max(), periods=num_time_periods)
y_levels = np.linspace(np.nanmin(pres), np.nanmax(pres), num_y_levels)
tdata, ydata = np.meshgrid(x_times, y_levels, indexing='ij')
data = u_interp(tdata, ydata)
ytitle = ''.join(['(', pres.attrs['units'], ')'])
units = data_field + ' (' + self._obj[dsname][data_field].attrs['units'] + ')'
# Get the current plotting axis, add day/night background and plot data
if self.fig is None:
self.fig = plt.figure()
if self.axes is None:
self.axes = np.array([plt.axes()])
self.fig.add_axes(self.axes[0])
mesh = self.axes[subplot_index].pcolormesh(
x_times, y_levels, np.transpose(data), shading=set_shading, **kwargs
)
if day_night_background is True:
self.day_night_background(subplot_index=subplot_index, dsname=dsname)
# Set Title
if set_title is None:
set_title = ' '.join(
[
dsname,
'on',
dt_utils.numpy_to_arm_date(self._obj[dsname].time.values[0]),
]
)
self.axes[subplot_index].set_title(set_title)
# Set YTitle
if 'ytitle' in locals():
self.axes[subplot_index].set_ylabel(ytitle)
# Set X Limit - We want the same time axes for all subplots
time_rng = [x_times[-1], x_times[0]]
self.set_xrng(time_rng, subplot_index)
# Set Y Limit
if hasattr(self, 'yrng'):
# Make sure that the yrng is not just the default
if not np.all(self.yrng[subplot_index] == 0):
self.set_yrng(self.yrng[subplot_index], subplot_index)
else:
if ydata is None:
our_data = data.values
else:
our_data = ydata
if np.isfinite(our_data).any():
if invert_y_axis is False:
yrng = [np.nanmin(our_data), np.nanmax(our_data)]
else:
yrng = [np.nanmax(our_data), np.nanmin(our_data)]
else:
yrng = [0, 1]
self.set_yrng(yrng, subplot_index)
# Set X Format
if len(subplot_index) == 1:
days = self.xrng[subplot_index, 1] - self.xrng[subplot_index, 0]
else:
days = (
self.xrng[subplot_index[0], subplot_index[1], 1]
- self.xrng[subplot_index[0], subplot_index[1], 0]
)
# Put on an xlabel, but only if we are making the bottom-most plot
if subplot_index[0] == self.axes.shape[0] - 1:
self.axes[subplot_index].set_xlabel('Time [UTC]')
if ydata is not None:
if cbar_label is None:
self.add_colorbar(mesh, title=units, subplot_index=subplot_index)
else:
self.add_colorbar(mesh, title=cbar_label, subplot_index=subplot_index)
myFmt = common.get_date_format(days)
self.axes[subplot_index].xaxis.set_major_formatter(myFmt)
return self.axes[subplot_index]
def time_height_scatter(
self,
data_field=None,
dsname=None,
cmap='rainbow',
alt_label=None,
alt_field='alt',
cb_label=None,
**kwargs,
):
"""
Create a time series plot of altitude and data variable with
color also indicating value with a color bar. The Color bar is
positioned to serve both as the indicator of the color intensity
and the second y-axis.
Parameters
----------
data_field : str
Name of data field in the object to plot on second y-axis.
height_field : str
Name of height field in the object to plot on first y-axis.
dsname : str or None
The name of the datastream to plot.
cmap : str
Colorbar color map to use.
alt_label : str
Altitude first y-axis label to use. If None, will try to use
long_name and units.
alt_field : str
Label for field in the object to plot on first y-axis.
cb_label : str
Colorbar label to use. If not set will try to use
long_name and units.
**kwargs : keyword arguments
Any other keyword arguments that will be passed
into TimeSeriesDisplay.plot module when the figure
is made.
"""
if dsname is None and len(self._obj.keys()) > 1:
raise ValueError(
'You must choose a datastream when there are 2 '
'or more datasets in the TimeSeriesDisplay '
'object.'
)
elif dsname is None:
dsname = list(self._obj.keys())[0]
# Get data and dimensions
data = self._obj[dsname][data_field]
altitude = self._obj[dsname][alt_field]
dim = list(self._obj[dsname][data_field].dims)
xdata = self._obj[dsname][dim[0]]
if alt_label is None:
try:
alt_label = altitude.attrs['long_name'] + ''.join(
[' (', altitude.attrs['units'], ')']
)
except KeyError:
alt_label = alt_field
if cb_label is None:
try:
cb_label = data.attrs['long_name'] + ''.join([' (', data.attrs['units'], ')'])
except KeyError:
cb_label = data_field
colorbar_map = plt.cm.get_cmap(cmap)
self.fig.subplots_adjust(left=0.1, right=0.86, bottom=0.16, top=0.91)
ax1 = self.plot(alt_field, color='black', **kwargs)
ax1.set_ylabel(alt_label)
ax2 = ax1.twinx()
sc = ax2.scatter(xdata.values, data.values, c=data.values, marker='.', cmap=colorbar_map)
cbaxes = self.fig.add_axes(
[
self.fig.subplotpars.right + 0.02,
self.fig.subplotpars.bottom,
0.02,
self.fig.subplotpars.top - self.fig.subplotpars.bottom,
]
)
cbar = plt.colorbar(sc, cax=cbaxes)
ax2.set_ylim(cbar.mappable.get_clim())
cbar.ax.set_ylabel(cb_label)
ax2.set_yticklabels([])
return self.axes[0]
def qc_flag_block_plot(
self,
data_field=None,
dsname=None,
subplot_index=(0,),
time_rng=None,
assessment_color=None,
edgecolor='face',
set_shading='auto',
**kwargs,
):
"""
Create a time series plot of embedded quality control values
using broken barh plotting.
Parameters
----------
data_field : str
Name of data field in the object to plot corresponding quality
control.
dsname : None or str
If there is more than one datastream in the display object the
name of the datastream needs to be specified. If set to None and
there is only one datastream ACT will use the sole datastream
in the object.
subplot_index : 1 or 2D tuple, list, or array
The index of the subplot to set the x range of.
time_rng : tuple or list
List or tuple with (min, max) values to set the x-axis range limits.
assessment_color : dict
Dictionary lookup to override default assessment to color. Make sure
assessment work is correctly set with case syntax.
edgecolor : str or list
Color name, list of color names or 'face' as defined in matplotlib.axes.Axes.broken_barh
set_shading : string
Option to to set the matplotlib.pcolormesh shading parameter.
Default to 'auto'
**kwargs : keyword arguments
The keyword arguments for :func:`plt.broken_barh`.
"""
# Color to plot associated with assessment.
color_lookup = {
'Bad': 'red',
'Incorrect': 'red',
'Indeterminate': 'orange',
'Suspect': 'orange',
'Missing': 'darkgray',
'Not Failing': 'green',
'Acceptable': 'green',
}
if assessment_color is not None:
for asses, color in assessment_color.items():
color_lookup[asses] = color
if asses == 'Incorrect':
color_lookup['Bad'] = color
if asses == 'Suspect':
color_lookup['Indeterminate'] = color
# Set up list of test names to use for missing values
missing_val_long_names = [
'Value equal to missing_value*',
'Value set to missing_value*',
'Value is equal to missing_value*',
'Value is set to missing_value*',
]
if dsname is None and len(self._obj.keys()) > 1:
raise ValueError(
'You must choose a datastream when there are 2 '
'or more datasets in the TimeSeriesDisplay '
'object.'
)
elif dsname is None:
dsname = list(self._obj.keys())[0]
# Set up or get current plot figure
if self.fig is None:
self.fig = plt.figure()
# Set up or get current axes
if self.axes is None:
self.axes = np.array([plt.axes()])
self.fig.add_axes(self.axes[0])
ax = self.axes[subplot_index]
# Set X Limit - We want the same time axes for all subplots
data = self._obj[dsname][data_field]
dim = list(self._obj[dsname][data_field].dims)
xdata = self._obj[dsname][dim[0]]
# Get data and attributes
qc_data_field = self._obj[dsname].qcfilter.check_for_ancillary_qc(
data_field, add_if_missing=False, cleanup=False
)
if qc_data_field is None:
raise ValueError(f'No quality control ancillary variable in Dataset for {data_field}')
flag_masks = self._obj[dsname][qc_data_field].attrs['flag_masks']
flag_meanings = self._obj[dsname][qc_data_field].attrs['flag_meanings']
flag_assessments = self._obj[dsname][qc_data_field].attrs['flag_assessments']
# Get time ranges for green blocks
time_delta = determine_time_delta(xdata.values)
barh_list_green = reduce_time_ranges(xdata.values, time_delta=time_delta, broken_barh=True)
# Set background to gray indicating not available data
ax.set_facecolor('dimgray')
# Check if plotting 2D data vs 1D data. 2D data will be summarized by
# assessment category instead of showing each test.
data_shape = self._obj[dsname][qc_data_field].shape
if len(data_shape) > 1:
cur_assessments = list(set(flag_assessments))
cur_assessments.sort()
cur_assessments.reverse()
qc_data = np.full(data_shape, -1, dtype=np.int16)
plot_colors = []
tick_names = []
index = self._obj[dsname][qc_data_field].values == 0
if index.any():
qc_data[index] = 0
plot_colors.append(color_lookup['Not Failing'])
tick_names.append('Not Failing')
for ii, assess in enumerate(cur_assessments):
if assess not in color_lookup:
color_lookup[assess] = list(mplcolors.CSS4_COLORS.keys())[ii]
ii += 1
assess_data = self._obj[dsname].qcfilter.get_masked_data(
data_field, rm_assessments=assess
)
if assess_data.mask.any():
qc_data[assess_data.mask] = ii
plot_colors.append(color_lookup[assess])
tick_names.append(assess)
# Overwrite missing data. Not sure if we want to do this because VAPs set
# the value to missing but the test is set to Bad. This tries to overcome that
# by looking for correct test description that would only indicate the values
# are missing not that they are set to missing by a test... most likely.
missing_test_nums = []
for ii, flag_meaning in enumerate(flag_meanings):
# Check if the bit set is indicating missing data.
for val in missing_val_long_names:
if re_search(val, flag_meaning):
test_num = parse_bit(flag_masks[ii])[0]
missing_test_nums.append(test_num)
assess_data = self._obj[dsname].qcfilter.get_masked_data(
data_field, rm_tests=missing_test_nums
)
if assess_data.mask.any():
qc_data[assess_data.mask] = -1
plot_colors.append(color_lookup['Missing'])
tick_names.append('Missing')
# Create a masked array to allow not plotting where values are missing
qc_data = np.ma.masked_equal(qc_data, -1)
dims = self._obj[dsname][qc_data_field].dims
xvalues = self._obj[dsname][dims[0]].values
yvalues = self._obj[dsname][dims[1]].values
cMap = mplcolors.ListedColormap(plot_colors)
mesh = ax.pcolormesh(
xvalues,
yvalues,
np.transpose(qc_data),
cmap=cMap,
vmin=0,
shading=set_shading,
)
divider = make_axes_locatable(ax)
# Determine correct placement of words on colorbar
tick_nums = (
np.arange(0, len(tick_names) * 2 + 1) / (len(tick_names) * 2) * np.nanmax(qc_data)
)[1::2]
cax = divider.append_axes('bottom', size='5%', pad=0.3)
cbar = self.fig.colorbar(
mesh,
cax=cax,
orientation='horizontal',
spacing='uniform',
ticks=tick_nums,
shrink=0.5,
)
cbar.ax.set_xticklabels(tick_names)
# Set YTitle
dim_name = list(set(self._obj[dsname][qc_data_field].dims) - {'time'})
try:
ytitle = f"{dim_name[0]} ({self._obj[dsname][dim_name[0]].attrs['units']})"
ax.set_ylabel(ytitle)
except KeyError:
pass
# Add which tests were set as text to the plot
unique_values = []
for ii in np.unique(self._obj[dsname][qc_data_field].values):
unique_values.extend(parse_bit(ii))
if len(unique_values) > 0:
unique_values = list(set(unique_values))
unique_values.sort()
unique_values = [str(ii) for ii in unique_values]
self.fig.text(
0.5,
-0.35,
f"QC Tests Tripped: {', '.join(unique_values)}",
transform=ax.transAxes,
horizontalalignment='center',
verticalalignment='center',
fontweight='bold',
)
else:
test_nums = []
for ii, assess in enumerate(flag_assessments):
if assess not in color_lookup:
color_lookup[assess] = list(mplcolors.CSS4_COLORS.keys())[ii]
# Plot green data first.
ax.broken_barh(
barh_list_green,
(ii, ii + 1),
facecolors=color_lookup['Not Failing'],
edgecolor=edgecolor,
**kwargs,
)
# Get test number from flag_mask bitpacked number
test_nums.append(parse_bit(flag_masks[ii]))
# Get masked array data to use mask for finding if/where test is set
data = self._obj[dsname].qcfilter.get_masked_data(
data_field, rm_tests=test_nums[-1]
)
if np.any(data.mask):
# Get time ranges from time and masked data
barh_list = reduce_time_ranges(
xdata.values[data.mask], time_delta=time_delta, broken_barh=True
)
# Check if the bit set is indicating missing data. If so change
# to different plotting color than what is in flag_assessments.
for val in missing_val_long_names:
if re_search(val, flag_meanings[ii]):
assess = 'Missing'
break
# Lay down blocks of tripped tests using correct color
ax.broken_barh(
barh_list,
(ii, ii + 1),
facecolors=color_lookup[assess],
edgecolor=edgecolor,
**kwargs,
)
# Add test description to plot.
ax.text(xdata.values[0], ii + 0.5, ' ' + flag_meanings[ii], va='center')
# Change y ticks to test number
plt.yticks(
[ii + 0.5 for ii in range(0, len(test_nums))],
labels=['Test ' + str(ii[0]) for ii in test_nums],
)
# Set ylimit to number of tests plotted
ax.set_ylim(0, len(flag_assessments))
# Set X Limit - We want the same time axes for all subplots
if not hasattr(self, 'time_rng'):
if time_rng is not None:
self.time_rng = list(time_rng)
else:
self.time_rng = [xdata.min().values, xdata.max().values]
self.set_xrng(self.time_rng, subplot_index)
# Get X format - We want the same time axes for all subplots
if hasattr(self, 'time_fmt'):
ax.xaxis.set_major_formatter(self.time_fmt)
else:
# Set X Format
if len(subplot_index) == 1:
days = self.xrng[subplot_index, 1] - self.xrng[subplot_index, 0]
else:
days = (
self.xrng[subplot_index[0], subplot_index[1], 1]
- self.xrng[subplot_index[0], subplot_index[1], 0]
)
myFmt = common.get_date_format(days)
ax.xaxis.set_major_formatter(myFmt)
self.time_fmt = myFmt
return self.axes[subplot_index]
def fill_between(
self,
field,
dsname=None,
subplot_index=(0,),
set_title=None,
secondary_y=False,
**kwargs,
):
"""
Makes a fill_between plot, based on matplotlib
Parameters
----------
field : str
The name of the field to plot.
dsname : None or str
If there is more than one datastream in the display object the
name of the datastream needs to be specified. If set to None and
there is only one datastream ACT will use the sole datastream
in the object.
subplot_index : 1 or 2D tuple, list, or array
The index of the subplot to set the x range of.
set_title : str
The title for the plot.
secondary_y : boolean
Option to indicate if the data should be plotted on second y-axis.
**kwargs : keyword arguments
The keyword arguments for :func:`plt.plot` (1D timeseries) or
:func:`plt.pcolormesh` (2D timeseries).
Returns
-------
ax : matplotlib axis handle
The matplotlib axis handle of the plot.
"""
if dsname is None and len(self._obj.keys()) > 1:
raise ValueError(
'You must choose a datastream when there are 2 '
'or more datasets in the TimeSeriesDisplay '
'object.'
)
elif dsname is None:
dsname = list(self._obj.keys())[0]
# Get data and dimensions
data = self._obj[dsname][field]
dim = list(self._obj[dsname][field].dims)
xdata = self._obj[dsname][dim[0]]
if 'units' in data.attrs:
ytitle = ''.join(['(', data.attrs['units'], ')'])
else:
ytitle = field
# Get the current plotting axis, add day/night background and plot data
if self.fig is None:
self.fig = plt.figure()
if self.axes is None:
self.axes = np.array([plt.axes()])
self.fig.add_axes(self.axes[0])
# Set ax to appropriate axis
if secondary_y is False:
ax = self.axes[subplot_index]
else:
ax = self.axes[subplot_index].twinx()
ax.fill_between(xdata.values, data, **kwargs)
# Set X Format
if len(subplot_index) == 1:
days = self.xrng[subplot_index, 1] - self.xrng[subplot_index, 0]
else:
days = (
self.xrng[subplot_index[0], subplot_index[1], 1]
- self.xrng[subplot_index[0], subplot_index[1], 0]
)
myFmt = common.get_date_format(days)
ax.xaxis.set_major_formatter(myFmt)
# Set X format - We want the same time axes for all subplots
if not hasattr(self, 'time_fmt'):
self.time_fmt = myFmt
# Put on an xlabel, but only if we are making the bottom-most plot
if subplot_index[0] == self.axes.shape[0] - 1:
self.axes[subplot_index].set_xlabel('Time [UTC]')
# Set YTitle
ax.set_ylabel(ytitle)
# Set Title
if set_title is None:
set_title = ' '.join(
[
dsname,
field,
'on',
dt_utils.numpy_to_arm_date(self._obj[dsname].time.values[0]),
]
)
if secondary_y is False:
ax.set_title(set_title)
return self.axes[subplot_index]
<|code_end|>
|
act/plotting/timeseriesdisplay.py
<|code_start|>"""
Stores the class for TimeSeriesDisplay.
"""
import datetime as dt
import warnings
from copy import deepcopy
from re import search, search as re_search
import matplotlib.dates as mdates
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from matplotlib import colors as mplcolors
from mpl_toolkits.axes_grid1 import make_axes_locatable
from scipy.interpolate import NearestNDInterpolator
from ..qc.qcfilter import parse_bit
from ..utils import data_utils, datetime_utils as dt_utils
from ..utils.datetime_utils import determine_time_delta, reduce_time_ranges
from ..utils.geo_utils import get_sunrise_sunset_noon
from . import common
from .plot import Display
class TimeSeriesDisplay(Display):
"""
This subclass contains routines that are specific to plotting
time series plots from data. It is inherited from Display and therefore
contains all of Display's attributes and methods.
Examples
--------
To create a TimeSeriesDisplay with 3 rows, simply do:
.. code-block:: python
ds = act.read_netcdf(the_file)
disp = act.plotting.TimeSeriesDisplay(ds, subplot_shape=(3,), figsize=(15, 5))
The TimeSeriesDisplay constructor takes in the same keyword arguments as
plt.subplots. For more information on the plt.subplots keyword arguments,
see the `matplotlib documentation
<https://matplotlib.org/api/_as_gen/matplotlib.pyplot.subplots.html>`_.
If no subplot_shape is provided, then no figure or axis will be created
until add_subplots or plots is called.
"""
def __init__(self, obj, subplot_shape=(1,), ds_name=None, **kwargs):
super().__init__(obj, subplot_shape, ds_name, **kwargs)
def day_night_background(self, dsname=None, subplot_index=(0,)):
"""
Colorcodes the background according to sunrise/sunset.
Parameters
----------
dsname : None or str
If there is more than one datastream in the display object the
name of the datastream needs to be specified. If set to None and
there is only one datastream then ACT will use the sole datastream
in the object.
subplot_index : 1 or 2D tuple, list, or array
The index to the subplot to place the day and night background in.
"""
if dsname is None and len(self._obj.keys()) > 1:
raise ValueError(
'You must choose a datastream to derive the '
+ 'information needed for the day and night '
+ 'background when 2 or more datasets are in '
+ 'the display object.'
)
elif dsname is None:
dsname = list(self._obj.keys())[0]
# Get File Dates
try:
file_dates = self._obj[dsname].attrs['_file_dates']
except KeyError:
file_dates = []
if len(file_dates) == 0:
sdate = dt_utils.numpy_to_arm_date(self._obj[dsname].time.values[0])
edate = dt_utils.numpy_to_arm_date(self._obj[dsname].time.values[-1])
file_dates = [sdate, edate]
all_dates = dt_utils.dates_between(file_dates[0], file_dates[-1])
if self.axes is None:
raise RuntimeError('day_night_background requires the plot to ' 'be displayed.')
ax = self.axes[subplot_index]
# Find variable names for latitude and longitude
variables = list(self._obj[dsname].data_vars)
lat_name = [var for var in ['lat', 'latitude'] if var in variables]
lon_name = [var for var in ['lon', 'longitude'] if var in variables]
if len(lat_name) == 0:
lat_name = None
else:
lat_name = lat_name[0]
if len(lon_name) == 0:
lon_name = None
else:
lon_name = lon_name[0]
# Variable name does not match, look for standard_name declaration
if lat_name is None or lon_name is None:
for var in variables:
try:
if self._obj[dsname][var].attrs['standard_name'] == 'latitude':
lat_name = var
except KeyError:
pass
try:
if self._obj[dsname][var].attrs['standard_name'] == 'longitude':
lon_name = var
except KeyError:
pass
if lat_name is not None and lon_name is not None:
break
if lat_name is None or lon_name is None:
return
try:
if self._obj[dsname][lat_name].data.size > 1:
# Look for non-NaN values to use for locaiton. If not found use first value.
lat = self._obj[dsname][lat_name].values
index = np.where(np.isfinite(lat))[0]
if index.size == 0:
index = [0]
lat = float(lat[index[0]])
# Look for non-NaN values to use for locaiton. If not found use first value.
lon = self._obj[dsname][lon_name].values
index = np.where(np.isfinite(lon))[0]
if index.size == 0:
index = [0]
lon = float(lon[index[0]])
else:
lat = float(self._obj[dsname][lat_name].values)
lon = float(self._obj[dsname][lon_name].values)
except AttributeError:
return
if not np.isfinite(lat):
warnings.warn(f"Latitude value in dataset of '{lat}' is not finite. ", RuntimeWarning)
return
if not np.isfinite(lon):
warnings.warn(f"Longitude value in dataset of '{lon}' is not finite. ", RuntimeWarning)
return
lat_range = [-90, 90]
if not (lat_range[0] <= lat <= lat_range[1]):
warnings.warn(
f"Latitude value in dataset of '{lat}' not within acceptable "
f'range of {lat_range[0]} <= latitude <= {lat_range[1]}. ',
RuntimeWarning,
)
return
lon_range = [-180, 180]
if not (lon_range[0] <= lon <= lon_range[1]):
warnings.warn(
f"Longitude value in dataset of '{lon}' not within acceptable "
f'range of {lon_range[0]} <= longitude <= {lon_range[1]}. ',
RuntimeWarning,
)
return
# initialize the plot to a gray background for total darkness
rect = ax.patch
rect.set_facecolor('0.85')
# Get date ranges to plot
plot_dates = []
for f in all_dates:
for ii in [-1, 0, 1]:
plot_dates.append(f + dt.timedelta(days=ii))
# Get sunrise, sunset and noon times
sunrise, sunset, noon = get_sunrise_sunset_noon(lat, lon, plot_dates)
# Plot daylight
for ii in range(0, len(sunrise)):
ax.axvspan(sunrise[ii], sunset[ii], facecolor='#FFFFCC', zorder=0)
# Plot noon line
for ii in noon:
ax.axvline(x=ii, linestyle='--', color='y', zorder=1)
def set_xrng(self, xrng, subplot_index=(0,)):
"""
Sets the x range of the plot.
Parameters
----------
xrng : 2 number array
The x limits of the plot.
subplot_index : 1 or 2D tuple, list, or array
The index of the subplot to set the x range of.
"""
if self.axes is None:
raise RuntimeError('set_xrng requires the plot to be displayed.')
if not hasattr(self, 'xrng') and len(self.axes.shape) == 2:
self.xrng = np.zeros((self.axes.shape[0], self.axes.shape[1], 2), dtype='datetime64[D]')
elif not hasattr(self, 'xrng') and len(self.axes.shape) == 1:
self.xrng = np.zeros((self.axes.shape[0], 2), dtype='datetime64[D]')
with warnings.catch_warnings():
warnings.filterwarnings('ignore', category=UserWarning)
self.axes[subplot_index].set_xlim(xrng)
self.xrng[subplot_index, :] = np.array(xrng, dtype='datetime64[D]')
def set_yrng(self, yrng, subplot_index=(0,)):
"""
Sets the y range of the plot.
Parameters
----------
yrng : 2 number array
The y limits of the plot.
subplot_index : 1 or 2D tuple, list, or array
The index of the subplot to set the x range of.
"""
if self.axes is None:
raise RuntimeError('set_yrng requires the plot to be displayed.')
if not hasattr(self, 'yrng') and len(self.axes.shape) == 2:
self.yrng = np.zeros((self.axes.shape[0], self.axes.shape[1], 2))
elif not hasattr(self, 'yrng') and len(self.axes.shape) == 1:
self.yrng = np.zeros((self.axes.shape[0], 2))
if yrng[0] == yrng[1]:
yrng[1] = yrng[1] + 1
self.axes[subplot_index].set_ylim(yrng)
self.yrng[subplot_index, :] = yrng
def plot(
self,
field,
dsname=None,
subplot_index=(0,),
cmap=None,
set_title=None,
add_nan=False,
day_night_background=False,
invert_y_axis=False,
abs_limits=(None, None),
time_rng=None,
y_rng=None,
use_var_for_y=None,
set_shading='auto',
assessment_overplot=False,
overplot_marker='.',
overplot_behind=False,
overplot_markersize=6,
assessment_overplot_category={
'Incorrect': ['Bad', 'Incorrect'],
'Suspect': ['Indeterminate', 'Suspect'],
},
assessment_overplot_category_color={'Incorrect': 'red', 'Suspect': 'orange'},
force_line_plot=False,
labels=False,
cbar_label=None,
secondary_y=False,
**kwargs,
):
"""
Makes a timeseries plot. If subplots have not been added yet, an axis
will be created assuming that there is only going to be one plot.
Parameters
----------
field : str
The name of the field to plot.
dsname : None or str
If there is more than one datastream in the display object the
name of the datastream needs to be specified. If set to None and
there is only one datastream ACT will use the sole datastream
in the object.
subplot_index : 1 or 2D tuple, list, or array
The index of the subplot to set the x range of.
cmap : matplotlib colormap
The colormap to use.
set_title : str
The title for the plot.
add_nan : bool
Set to True to fill in data gaps with NaNs.
day_night_background : bool
Set to True to fill in a color coded background.
according to the time of day.
abs_limits : tuple or list
Sets the bounds on plot limits even if data values exceed
those limits. Set to (ymin,ymax). Use None if only setting
minimum or maximum limit, i.e. (22., None).
time_rng : tuple or list
List or tuple with (min, max) values to set the x-axis range
limits.
y_rng : tuple or list
List or tuple with (min, max) values to set the y-axis range
use_var_for_y : str
Set this to the name of a data variable in the Dataset to use as
the y-axis variable instead of the default dimension. Useful for
instances where data has an index-based dimension instead of a
height-based dimension. If shapes of arrays do not match it will
automatically revert back to the original ydata.
set_shading : string
Option to to set the matplotlib.pcolormesh shading parameter.
Default to 'auto'
assessment_overplot : boolean
Option to overplot quality control colored symbols over plotted
data using flag_assessment categories.
overplot_marker : str
Marker to use for overplot symbol.
overplot_behind : bool
Place the overplot marker behind the data point.
overplot_markersize : float or int
Size of overplot marker. If overplot_behind or force_line_plot
are set the marker size will be double overplot_markersize so
the color is visible.
assessment_overplot_category : dict
Lookup to categorize assessments into groups. This allows using
multiple terms for the same quality control level of failure.
Also allows adding more to the defaults.
assessment_overplot_category_color : dict
Lookup to match overplot category color to assessment grouping.
force_line_plot : boolean
Option to plot 2D data as 1D line plots.
labels : boolean or list
Option to overwrite the legend labels. Must have same dimensions as
number of lines plotted.
cbar_label : str
Option to overwrite default colorbar label.
secondary_y : boolean
Option to plot on secondary y axis.
**kwargs : keyword arguments
The keyword arguments for :func:`plt.plot` (1D timeseries) or
:func:`plt.pcolormesh` (2D timeseries).
Returns
-------
ax : matplotlib axis handle
The matplotlib axis handle of the plot.
"""
if dsname is None and len(self._obj.keys()) > 1:
raise ValueError(
'You must choose a datastream when there are 2 '
'or more datasets in the TimeSeriesDisplay '
'object.'
)
elif dsname is None:
dsname = list(self._obj.keys())[0]
# Get data and dimensions
data = self._obj[dsname][field]
dim = list(self._obj[dsname][field].dims)
xdata = self._obj[dsname][dim[0]]
if 'units' in data.attrs:
ytitle = ''.join(['(', data.attrs['units'], ')'])
else:
ytitle = field
if cbar_label is None:
cbar_default = ytitle
if len(dim) > 1:
if use_var_for_y is None:
ydata = self._obj[dsname][dim[1]]
else:
ydata = self._obj[dsname][use_var_for_y]
ydata_dim1 = self._obj[dsname][dim[1]]
if np.shape(ydata) != np.shape(ydata_dim1):
ydata = ydata_dim1
units = ytitle
if 'units' in ydata.attrs.keys():
units = ydata.attrs['units']
ytitle = ''.join(['(', units, ')'])
else:
units = ''
ytitle = dim[1]
# Create labels if 2d as 1d
if force_line_plot is True:
if labels is True:
labels = [' '.join([str(d), units]) for d in ydata.values]
ytitle = f"({data.attrs['units']})"
ydata = None
else:
ydata = None
# Get the current plotting axis, add day/night background and plot data
if self.fig is None:
self.fig = plt.figure()
if self.axes is None:
self.axes = np.array([plt.axes()])
self.fig.add_axes(self.axes[0])
# Set up secondary y axis if requested
if secondary_y is False:
ax = self.axes[subplot_index]
else:
ax = self.axes[subplot_index].twinx()
if ydata is None:
if day_night_background is True:
self.day_night_background(subplot_index=subplot_index, dsname=dsname)
# If limiting data being plotted use masked arrays
# Need to do it this way because of autoscale() method
if abs_limits[0] is not None and abs_limits[1] is not None:
data = np.ma.masked_outside(data, abs_limits[0], abs_limits[1])
elif abs_limits[0] is not None and abs_limits[1] is None:
data = np.ma.masked_less_equal(data, abs_limits[0])
elif abs_limits[0] is None and abs_limits[1] is not None:
data = np.ma.masked_greater_equal(data, abs_limits[1])
# Plot the data
if 'marker' not in kwargs.keys():
kwargs['marker'] = '.'
lines = ax.plot(xdata, data, **kwargs)
# Check if we need to call legend method after plotting. This is only
# called when no assessment overplot is called.
add_legend = False
if 'label' in kwargs.keys():
add_legend = True
# Overplot failing data if requested
if assessment_overplot:
# If we are doing forced line plot from 2D data need to manage
# legend lables. Will make arrays to hold labels of QC failing
# because not set when labels not set.
if not isinstance(labels, list) and add_legend is False:
labels = []
lines = []
# For forced line plot need to plot QC behind point instead of
# on top of point.
zorder = None
if force_line_plot or overplot_behind:
zorder = 0
overplot_markersize *= 2.0
for assessment, categories in assessment_overplot_category.items():
flag_data = self._obj[dsname].qcfilter.get_masked_data(
field, rm_assessments=categories, return_inverse=True
)
if np.invert(flag_data.mask).any() and np.isfinite(flag_data).any():
try:
flag_data.mask = np.logical_or(data.mask, flag_data.mask)
except AttributeError:
pass
qc_ax = ax.plot(
xdata,
flag_data,
marker=overplot_marker,
linestyle='',
markersize=overplot_markersize,
color=assessment_overplot_category_color[assessment],
label=assessment,
zorder=zorder,
)
# If labels keyword is set need to add labels for calling legend
if isinstance(labels, list):
# If plotting forced_line_plot need to subset the Line2D object
# so we don't have more than one added to legend.
if len(qc_ax) > 1:
lines.extend(qc_ax[:1])
else:
lines.extend(qc_ax)
labels.append(assessment)
add_legend = True
# Add legend if labels are available
if isinstance(labels, list):
ax.legend(lines, labels)
elif add_legend:
ax.legend()
else:
# Add in nans to ensure the data are not streaking
if add_nan is True:
xdata, data = data_utils.add_in_nan(xdata, data)
# Sets shading parameter to auto. Matplotlib will check deminsions.
# If X,Y and C are same deminsions shading is set to nearest.
# If X and Y deminsions are 1 greater than C shading is set to flat.
if 'edgecolors' not in kwargs.keys():
kwargs['edgecolors'] = 'face'
mesh = ax.pcolormesh(
np.asarray(xdata),
ydata,
data.transpose(),
shading=set_shading,
cmap=cmap,
**kwargs,
)
# Set Title
if set_title is None:
if isinstance(self._obj[dsname].time.values[0], np.datetime64):
set_title = ' '.join(
[
dsname,
field,
'on',
dt_utils.numpy_to_arm_date(self._obj[dsname].time.values[0]),
]
)
else:
date_result = search(
r'\d{4}-\d{1,2}-\d{1,2}', self._obj[dsname].time.attrs['units']
)
if date_result is not None:
set_title = ' '.join([dsname, field, 'on', date_result.group(0)])
else:
set_title = ' '.join([dsname, field])
if secondary_y is False:
ax.set_title(set_title)
# Set YTitle
ax.set_ylabel(ytitle)
# Set X Limit - We want the same time axes for all subplots
if not hasattr(self, 'time_rng'):
if time_rng is not None:
self.time_rng = list(time_rng)
else:
self.time_rng = [xdata.min().values, xdata.max().values]
self.set_xrng(self.time_rng, subplot_index)
# Set Y Limit
if y_rng is not None:
self.set_yrng(y_rng)
if hasattr(self, 'yrng'):
# Make sure that the yrng is not just the default
if ydata is None:
if abs_limits[0] is not None or abs_limits[1] is not None:
our_data = data
else:
our_data = data.values
else:
our_data = ydata
finite = np.isfinite(our_data)
if finite.any():
our_data = our_data[finite]
if invert_y_axis is False:
yrng = [np.min(our_data), np.max(our_data)]
else:
yrng = [np.max(our_data), np.min(our_data)]
else:
yrng = [0, 1]
# Check if current range is outside of new range an only set
# values that work for all data plotted.
if isinstance(yrng[0], np.datetime64):
yrng = mdates.datestr2num([str(yrng[0]), str(yrng[1])])
current_yrng = ax.get_ylim()
if yrng[0] > current_yrng[0]:
yrng[0] = current_yrng[0]
if yrng[1] < current_yrng[1]:
yrng[1] = current_yrng[1]
# Set y range the normal way if not secondary y
# If secondary, just use set_ylim
if secondary_y is False:
self.set_yrng(yrng, subplot_index)
else:
ax.set_ylim(yrng)
# Set X Format
if len(subplot_index) == 1:
days = self.xrng[subplot_index, 1] - self.xrng[subplot_index, 0]
else:
days = (
self.xrng[subplot_index[0], subplot_index[1], 1]
- self.xrng[subplot_index[0], subplot_index[1], 0]
)
myFmt = common.get_date_format(days)
ax.xaxis.set_major_formatter(myFmt)
# Set X format - We want the same time axes for all subplots
if not hasattr(self, 'time_fmt'):
self.time_fmt = myFmt
# Put on an xlabel, but only if we are making the bottom-most plot
if subplot_index[0] == self.axes.shape[0] - 1:
ax.set_xlabel('Time [UTC]')
if ydata is not None:
if cbar_label is None:
self.add_colorbar(mesh, title=cbar_default, subplot_index=subplot_index)
else:
self.add_colorbar(
mesh,
title=''.join(['(', cbar_label, ')']),
subplot_index=subplot_index,
)
return ax
def plot_barbs_from_spd_dir(
self, speed_field, direction_field, pres_field=None, dsname=None, **kwargs
):
"""
This procedure will make a wind barb plot timeseries.
If a pressure field is given and the wind fields are 1D, which, for
example, would occur if one wants to plot a timeseries of
rawinsonde data, then a time-height cross section of
winds will be made.
Note: This procedure calls plot_barbs_from_u_v and will take in the
same keyword arguments as that procedure.
Parameters
----------
speed_field : str
The name of the field specifying the wind speed in m/s.
direction_field : str
The name of the field specifying the wind direction in degrees.
0 degrees is defined to be north and increases clockwise like
what is used in standard meteorological notation.
pres_field : str
The name of the field specifying pressure or height. If using
height coordinates, then we recommend setting invert_y_axis
to False.
dsname : str
The name of the datastream to plot. Setting to None will make
ACT attempt to autodetect this.
kwargs : dict
Any additional keyword arguments will be passed into
:func:`act.plotting.TimeSeriesDisplay.plot_barbs_from_u_and_v`.
Returns
-------
the_ax : matplotlib axis handle
The handle to the axis where the plot was made on.
Examples
--------
..code-block :: python
sonde_ds = act.io.armfiles.read_netcdf(
act.tests.sample_files.EXAMPLE_TWP_SONDE_WILDCARD)
BarbDisplay = act.plotting.TimeSeriesDisplay(
{'sonde_darwin': sonde_ds}, figsize=(10,5))
BarbDisplay.plot_barbs_from_spd_dir('deg', 'wspd', 'pres',
num_barbs_x=20)
"""
if dsname is None and len(self._obj.keys()) > 1:
raise ValueError(
'You must choose a datastream when there are 2 '
'or more datasets in the TimeSeriesDisplay '
'object.'
)
elif dsname is None:
dsname = list(self._obj.keys())[0]
# Make temporary field called tempu, tempv
spd = self._obj[dsname][speed_field]
dir = self._obj[dsname][direction_field]
tempu = -np.sin(np.deg2rad(dir)) * spd
tempv = -np.cos(np.deg2rad(dir)) * spd
self._obj[dsname]['temp_u'] = deepcopy(self._obj[dsname][speed_field])
self._obj[dsname]['temp_v'] = deepcopy(self._obj[dsname][speed_field])
self._obj[dsname]['temp_u'].values = tempu
self._obj[dsname]['temp_v'].values = tempv
the_ax = self.plot_barbs_from_u_v('temp_u', 'temp_v', pres_field, dsname, **kwargs)
del self._obj[dsname]['temp_u'], self._obj[dsname]['temp_v']
return the_ax
def plot_barbs_from_u_v(
self,
u_field,
v_field,
pres_field=None,
dsname=None,
subplot_index=(0,),
set_title=None,
day_night_background=False,
invert_y_axis=True,
num_barbs_x=20,
num_barbs_y=20,
use_var_for_y=None,
**kwargs,
):
"""
This function will plot a wind barb timeseries from u and v wind
data. If pres_field is given, a time-height series will be plotted
from 1-D wind data.
Parameters
----------
u_field : str
The name of the field containing the U component of the wind.
v_field : str
The name of the field containing the V component of the wind.
pres_field : str or None
The name of the field containing the pressure or height. Set
to None to not use this.
dsname : str or None
The name of the datastream to plot. Setting to None will make
ACT automatically try to determine this.
subplot_index : 2-tuple
The index of the subplot to make the plot on.
set_title : str or None
The title of the plot.
day_night_background : bool
Set to True to plot a day/night background.
invert_y_axis : bool
Set to True to invert the y axis (i.e. for plotting pressure as
the height coordinate).
num_barbs_x : int
The number of wind barbs to plot in the x axis.
num_barbs_y : int
The number of wind barbs to plot in the y axis.
cmap : matplotlib.colors.LinearSegmentedColormap
A color map to use with wind barbs. If this is set the plt.barbs
routine will be passed the C parameter scaled as sqrt of sum of the
squares and used with the passed in color map. A colorbar will also
be added. Setting the limits of the colorbar can be done with 'clim'.
Setting this changes the wind barbs from black to colors.
use_var_for_y : str
Set this to the name of a data variable in the Dataset to use as the
y-axis variable instead of the default dimension. Useful for instances
where data has an index-based dimension instead of a height-based
dimension. If shapes of arrays do not match it will automatically
revert back to the original ydata.
**kwargs : keyword arguments
Additional keyword arguments will be passed into plt.barbs.
Returns
-------
ax : matplotlib axis handle
The axis handle that contains the reference to the
constructed plot.
"""
if dsname is None and len(self._obj.keys()) > 1:
raise ValueError(
'You must choose a datastream when there are 2 '
'or more datasets in the TimeSeriesDisplay '
'object.'
)
elif dsname is None:
dsname = list(self._obj.keys())[0]
# Get data and dimensions
u = self._obj[dsname][u_field].values
v = self._obj[dsname][v_field].values
dim = list(self._obj[dsname][u_field].dims)
xdata = self._obj[dsname][dim[0]].values
num_x = xdata.shape[-1]
barb_step_x = round(num_x / num_barbs_x)
if barb_step_x == 0:
barb_step_x = 1
if len(dim) > 1 and pres_field is None:
if use_var_for_y is None:
ydata = self._obj[dsname][dim[1]]
else:
ydata = self._obj[dsname][use_var_for_y]
ydata_dim1 = self._obj[dsname][dim[1]]
if np.shape(ydata) != np.shape(ydata_dim1):
ydata = ydata_dim1
if 'units' in ydata.attrs:
units = ydata.attrs['units']
else:
units = ''
ytitle = ''.join(['(', units, ')'])
num_y = ydata.shape[0]
barb_step_y = round(num_y / num_barbs_y)
if barb_step_y == 0:
barb_step_y = 1
xdata, ydata = np.meshgrid(xdata, ydata, indexing='ij')
elif pres_field is not None:
# What we will do here is do a nearest-neighbor interpolation
# for each member of the series. Coordinates are time, pressure
pres = self._obj[dsname][pres_field]
u_interp = NearestNDInterpolator((xdata, pres.values), u, rescale=True)
v_interp = NearestNDInterpolator((xdata, pres.values), v, rescale=True)
barb_step_x = 1
barb_step_y = 1
x_times = pd.date_range(xdata.min(), xdata.max(), periods=num_barbs_x)
if num_barbs_y == 1:
y_levels = pres.mean()
else:
y_levels = np.linspace(np.nanmin(pres), np.nanmax(pres), num_barbs_y)
xdata, ydata = np.meshgrid(x_times, y_levels, indexing='ij')
u = u_interp(xdata, ydata)
v = v_interp(xdata, ydata)
if 'units' in pres.attrs:
units = pres.attrs['units']
else:
units = ''
ytitle = ''.join(['(', units, ')'])
else:
ydata = None
# Get the current plotting axis, add day/night background and plot data
if self.fig is None:
self.fig = plt.figure()
if self.axes is None:
self.axes = np.array([plt.axes()])
self.fig.add_axes(self.axes[0])
if ydata is None:
ydata = np.ones(xdata.shape)
if 'cmap' in kwargs.keys():
map_color = np.sqrt(np.power(u[::barb_step_x], 2) + np.power(v[::barb_step_x], 2))
map_color[np.isnan(map_color)] = 0
ax = self.axes[subplot_index].barbs(
xdata[::barb_step_x],
ydata[::barb_step_x],
u[::barb_step_x],
v[::barb_step_x],
map_color,
**kwargs,
)
plt.colorbar(
ax,
ax=[self.axes[subplot_index]],
label='Wind Speed (' + self._obj[dsname][u_field].attrs['units'] + ')',
)
else:
self.axes[subplot_index].barbs(
xdata[::barb_step_x],
ydata[::barb_step_x],
u[::barb_step_x],
v[::barb_step_x],
**kwargs,
)
self.axes[subplot_index].set_yticks([])
else:
if 'cmap' in kwargs.keys():
map_color = np.sqrt(
np.power(u[::barb_step_x, ::barb_step_y], 2)
+ np.power(v[::barb_step_x, ::barb_step_y], 2)
)
map_color[np.isnan(map_color)] = 0
ax = self.axes[subplot_index].barbs(
xdata[::barb_step_x, ::barb_step_y],
ydata[::barb_step_x, ::barb_step_y],
u[::barb_step_x, ::barb_step_y],
v[::barb_step_x, ::barb_step_y],
map_color,
**kwargs,
)
plt.colorbar(
ax,
ax=[self.axes[subplot_index]],
label='Wind Speed (' + self._obj[dsname][u_field].attrs['units'] + ')',
)
else:
ax = self.axes[subplot_index].barbs(
xdata[::barb_step_x, ::barb_step_y],
ydata[::barb_step_x, ::barb_step_y],
u[::barb_step_x, ::barb_step_y],
v[::barb_step_x, ::barb_step_y],
**kwargs,
)
if day_night_background is True:
self.day_night_background(subplot_index=subplot_index, dsname=dsname)
# Set Title
if set_title is None:
set_title = ' '.join(
[
dsname,
'on',
dt_utils.numpy_to_arm_date(self._obj[dsname].time.values[0]),
]
)
self.axes[subplot_index].set_title(set_title)
# Set YTitle
if 'ytitle' in locals():
self.axes[subplot_index].set_ylabel(ytitle)
# Set X Limit - We want the same time axes for all subplots
time_rng = [xdata.min(), xdata.max()]
self.set_xrng(time_rng, subplot_index)
# Set Y Limit
if hasattr(self, 'yrng'):
# Make sure that the yrng is not just the default
if not np.all(self.yrng[subplot_index] == 0):
self.set_yrng(self.yrng[subplot_index], subplot_index)
else:
if ydata is None:
our_data = xdata
else:
our_data = ydata
if np.isfinite(our_data).any():
if invert_y_axis is False:
yrng = [np.nanmin(our_data), np.nanmax(our_data)]
else:
yrng = [np.nanmax(our_data), np.nanmin(our_data)]
else:
yrng = [0, 1]
self.set_yrng(yrng, subplot_index)
# Set X Format
if len(subplot_index) == 1:
days = self.xrng[subplot_index, 1] - self.xrng[subplot_index, 0]
else:
days = (
self.xrng[subplot_index[0], subplot_index[1], 1]
- self.xrng[subplot_index[0], subplot_index[1], 0]
)
# Put on an xlabel, but only if we are making the bottom-most plot
if subplot_index[0] == self.axes.shape[0] - 1:
self.axes[subplot_index].set_xlabel('Time [UTC]')
myFmt = common.get_date_format(days)
self.axes[subplot_index].xaxis.set_major_formatter(myFmt)
return self.axes[subplot_index]
def plot_time_height_xsection_from_1d_data(
self,
data_field,
pres_field,
dsname=None,
subplot_index=(0,),
set_title=None,
day_night_background=False,
num_time_periods=20,
num_y_levels=20,
invert_y_axis=True,
cbar_label=None,
set_shading='auto',
**kwargs,
):
"""
This will plot a time-height cross section from 1D datasets using
nearest neighbor interpolation on a regular time by height grid.
All that is needed are a data variable and a height variable.
Parameters
----------
data_field : str
The name of the field to plot.
pres_field : str
The name of the height or pressure field to plot.
dsname : str or None
The name of the datastream to plot
subplot_index : 2-tuple
The index of the subplot to create the plot on.
set_title : str or None
The title of the plot.
day_night_background : bool
Set to true to plot the day/night background.
num_time_periods : int
Set to determine how many time periods. Setting to None
will do one time period per day.
num_y_levels : int
The number of levels in the y axis to use.
invert_y_axis : bool
Set to true to invert the y-axis (recommended for
pressure coordinates).
cbar_label : str
Option to overwrite default colorbar label.
set_shading : string
Option to to set the matplotlib.pcolormesh shading parameter.
Default to 'auto'
**kwargs : keyword arguments
Additional keyword arguments will be passed
into :func:`plt.pcolormesh`
Returns
-------
ax : matplotlib axis handle
The matplotlib axis handle pointing to the plot.
"""
if dsname is None and len(self._obj.keys()) > 1:
raise ValueError(
'You must choose a datastream when there are 2'
'or more datasets in the TimeSeriesDisplay'
'object.'
)
elif dsname is None:
dsname = list(self._obj.keys())[0]
dim = list(self._obj[dsname][data_field].dims)
if len(dim) > 1:
raise ValueError(
'plot_time_height_xsection_from_1d_data only '
'supports 1-D datasets. For datasets with 2 or '
'more dimensions use plot().'
)
# Get data and dimensions
data = self._obj[dsname][data_field].values
xdata = self._obj[dsname][dim[0]].values
# What we will do here is do a nearest-neighbor interpolation for each
# member of the series. Coordinates are time, pressure
pres = self._obj[dsname][pres_field]
u_interp = NearestNDInterpolator((xdata, pres.values), data, rescale=True)
# Mask points where we have no data
# Count number of unique days
x_times = pd.date_range(xdata.min(), xdata.max(), periods=num_time_periods)
y_levels = np.linspace(np.nanmin(pres), np.nanmax(pres), num_y_levels)
tdata, ydata = np.meshgrid(x_times, y_levels, indexing='ij')
data = u_interp(tdata, ydata)
ytitle = ''.join(['(', pres.attrs['units'], ')'])
units = data_field + ' (' + self._obj[dsname][data_field].attrs['units'] + ')'
# Get the current plotting axis, add day/night background and plot data
if self.fig is None:
self.fig = plt.figure()
if self.axes is None:
self.axes = np.array([plt.axes()])
self.fig.add_axes(self.axes[0])
mesh = self.axes[subplot_index].pcolormesh(
x_times, y_levels, np.transpose(data), shading=set_shading, **kwargs
)
if day_night_background is True:
self.day_night_background(subplot_index=subplot_index, dsname=dsname)
# Set Title
if set_title is None:
set_title = ' '.join(
[
dsname,
'on',
dt_utils.numpy_to_arm_date(self._obj[dsname].time.values[0]),
]
)
self.axes[subplot_index].set_title(set_title)
# Set YTitle
if 'ytitle' in locals():
self.axes[subplot_index].set_ylabel(ytitle)
# Set X Limit - We want the same time axes for all subplots
time_rng = [x_times[-1], x_times[0]]
self.set_xrng(time_rng, subplot_index)
# Set Y Limit
if hasattr(self, 'yrng'):
# Make sure that the yrng is not just the default
if not np.all(self.yrng[subplot_index] == 0):
self.set_yrng(self.yrng[subplot_index], subplot_index)
else:
if ydata is None:
our_data = data.values
else:
our_data = ydata
if np.isfinite(our_data).any():
if invert_y_axis is False:
yrng = [np.nanmin(our_data), np.nanmax(our_data)]
else:
yrng = [np.nanmax(our_data), np.nanmin(our_data)]
else:
yrng = [0, 1]
self.set_yrng(yrng, subplot_index)
# Set X Format
if len(subplot_index) == 1:
days = self.xrng[subplot_index, 1] - self.xrng[subplot_index, 0]
else:
days = (
self.xrng[subplot_index[0], subplot_index[1], 1]
- self.xrng[subplot_index[0], subplot_index[1], 0]
)
# Put on an xlabel, but only if we are making the bottom-most plot
if subplot_index[0] == self.axes.shape[0] - 1:
self.axes[subplot_index].set_xlabel('Time [UTC]')
if ydata is not None:
if cbar_label is None:
self.add_colorbar(mesh, title=units, subplot_index=subplot_index)
else:
self.add_colorbar(mesh, title=cbar_label, subplot_index=subplot_index)
myFmt = common.get_date_format(days)
self.axes[subplot_index].xaxis.set_major_formatter(myFmt)
return self.axes[subplot_index]
def time_height_scatter(
self,
data_field=None,
dsname=None,
cmap='rainbow',
alt_label=None,
alt_field='alt',
cb_label=None,
**kwargs,
):
"""
Create a time series plot of altitude and data variable with
color also indicating value with a color bar. The Color bar is
positioned to serve both as the indicator of the color intensity
and the second y-axis.
Parameters
----------
data_field : str
Name of data field in the object to plot on second y-axis.
height_field : str
Name of height field in the object to plot on first y-axis.
dsname : str or None
The name of the datastream to plot.
cmap : str
Colorbar color map to use.
alt_label : str
Altitude first y-axis label to use. If None, will try to use
long_name and units.
alt_field : str
Label for field in the object to plot on first y-axis.
cb_label : str
Colorbar label to use. If not set will try to use
long_name and units.
**kwargs : keyword arguments
Any other keyword arguments that will be passed
into TimeSeriesDisplay.plot module when the figure
is made.
"""
if dsname is None and len(self._obj.keys()) > 1:
raise ValueError(
'You must choose a datastream when there are 2 '
'or more datasets in the TimeSeriesDisplay '
'object.'
)
elif dsname is None:
dsname = list(self._obj.keys())[0]
# Get data and dimensions
data = self._obj[dsname][data_field]
altitude = self._obj[dsname][alt_field]
dim = list(self._obj[dsname][data_field].dims)
xdata = self._obj[dsname][dim[0]]
if alt_label is None:
try:
alt_label = altitude.attrs['long_name'] + ''.join(
[' (', altitude.attrs['units'], ')']
)
except KeyError:
alt_label = alt_field
if cb_label is None:
try:
cb_label = data.attrs['long_name'] + ''.join([' (', data.attrs['units'], ')'])
except KeyError:
cb_label = data_field
colorbar_map = plt.cm.get_cmap(cmap)
self.fig.subplots_adjust(left=0.1, right=0.86, bottom=0.16, top=0.91)
ax1 = self.plot(alt_field, color='black', **kwargs)
ax1.set_ylabel(alt_label)
ax2 = ax1.twinx()
sc = ax2.scatter(xdata.values, data.values, c=data.values, marker='.', cmap=colorbar_map)
cbaxes = self.fig.add_axes(
[
self.fig.subplotpars.right + 0.02,
self.fig.subplotpars.bottom,
0.02,
self.fig.subplotpars.top - self.fig.subplotpars.bottom,
]
)
cbar = plt.colorbar(sc, cax=cbaxes)
ax2.set_ylim(cbar.mappable.get_clim())
cbar.ax.set_ylabel(cb_label)
ax2.set_yticklabels([])
return self.axes[0]
def qc_flag_block_plot(
self,
data_field=None,
dsname=None,
subplot_index=(0,),
time_rng=None,
assessment_color=None,
edgecolor='face',
set_shading='auto',
**kwargs,
):
"""
Create a time series plot of embedded quality control values
using broken barh plotting.
Parameters
----------
data_field : str
Name of data field in the object to plot corresponding quality
control.
dsname : None or str
If there is more than one datastream in the display object the
name of the datastream needs to be specified. If set to None and
there is only one datastream ACT will use the sole datastream
in the object.
subplot_index : 1 or 2D tuple, list, or array
The index of the subplot to set the x range of.
time_rng : tuple or list
List or tuple with (min, max) values to set the x-axis range limits.
assessment_color : dict
Dictionary lookup to override default assessment to color. Make sure
assessment work is correctly set with case syntax.
edgecolor : str or list
Color name, list of color names or 'face' as defined in matplotlib.axes.Axes.broken_barh
set_shading : string
Option to to set the matplotlib.pcolormesh shading parameter.
Default to 'auto'
**kwargs : keyword arguments
The keyword arguments for :func:`plt.broken_barh`.
"""
# Color to plot associated with assessment.
color_lookup = {
'Bad': 'red',
'Incorrect': 'red',
'Indeterminate': 'orange',
'Suspect': 'orange',
'Missing': 'darkgray',
'Not Failing': 'green',
'Acceptable': 'green',
}
if assessment_color is not None:
for asses, color in assessment_color.items():
color_lookup[asses] = color
if asses == 'Incorrect':
color_lookup['Bad'] = color
if asses == 'Suspect':
color_lookup['Indeterminate'] = color
# Set up list of test names to use for missing values
missing_val_long_names = [
'Value equal to missing_value*',
'Value set to missing_value*',
'Value is equal to missing_value*',
'Value is set to missing_value*',
]
if dsname is None and len(self._obj.keys()) > 1:
raise ValueError(
'You must choose a datastream when there are 2 '
'or more datasets in the TimeSeriesDisplay '
'object.'
)
elif dsname is None:
dsname = list(self._obj.keys())[0]
# Set up or get current plot figure
if self.fig is None:
self.fig = plt.figure()
# Set up or get current axes
if self.axes is None:
self.axes = np.array([plt.axes()])
self.fig.add_axes(self.axes[0])
ax = self.axes[subplot_index]
# Set X Limit - We want the same time axes for all subplots
data = self._obj[dsname][data_field]
dim = list(self._obj[dsname][data_field].dims)
xdata = self._obj[dsname][dim[0]]
# Get data and attributes
qc_data_field = self._obj[dsname].qcfilter.check_for_ancillary_qc(
data_field, add_if_missing=False, cleanup=False
)
if qc_data_field is None:
raise ValueError(f'No quality control ancillary variable in Dataset for {data_field}')
flag_masks = self._obj[dsname][qc_data_field].attrs['flag_masks']
flag_meanings = self._obj[dsname][qc_data_field].attrs['flag_meanings']
flag_assessments = self._obj[dsname][qc_data_field].attrs['flag_assessments']
# Get time ranges for green blocks
time_delta = determine_time_delta(xdata.values)
barh_list_green = reduce_time_ranges(xdata.values, time_delta=time_delta, broken_barh=True)
# Set background to gray indicating not available data
ax.set_facecolor('dimgray')
# Check if plotting 2D data vs 1D data. 2D data will be summarized by
# assessment category instead of showing each test.
data_shape = self._obj[dsname][qc_data_field].shape
if len(data_shape) > 1:
cur_assessments = list(set(flag_assessments))
cur_assessments.sort()
cur_assessments.reverse()
qc_data = np.full(data_shape, -1, dtype=np.int16)
plot_colors = []
tick_names = []
index = self._obj[dsname][qc_data_field].values == 0
if index.any():
qc_data[index] = 0
plot_colors.append(color_lookup['Not Failing'])
tick_names.append('Not Failing')
for ii, assess in enumerate(cur_assessments):
if assess not in color_lookup:
color_lookup[assess] = list(mplcolors.CSS4_COLORS.keys())[ii]
ii += 1
assess_data = self._obj[dsname].qcfilter.get_masked_data(
data_field, rm_assessments=assess
)
if assess_data.mask.any():
qc_data[assess_data.mask] = ii
plot_colors.append(color_lookup[assess])
tick_names.append(assess)
# Overwrite missing data. Not sure if we want to do this because VAPs set
# the value to missing but the test is set to Bad. This tries to overcome that
# by looking for correct test description that would only indicate the values
# are missing not that they are set to missing by a test... most likely.
missing_test_nums = []
for ii, flag_meaning in enumerate(flag_meanings):
# Check if the bit set is indicating missing data.
for val in missing_val_long_names:
if re_search(val, flag_meaning):
test_num = parse_bit(flag_masks[ii])[0]
missing_test_nums.append(test_num)
assess_data = self._obj[dsname].qcfilter.get_masked_data(
data_field, rm_tests=missing_test_nums
)
if assess_data.mask.any():
qc_data[assess_data.mask] = -1
plot_colors.append(color_lookup['Missing'])
tick_names.append('Missing')
# Create a masked array to allow not plotting where values are missing
qc_data = np.ma.masked_equal(qc_data, -1)
dims = self._obj[dsname][qc_data_field].dims
xvalues = self._obj[dsname][dims[0]].values
yvalues = self._obj[dsname][dims[1]].values
cMap = mplcolors.ListedColormap(plot_colors)
mesh = ax.pcolormesh(
xvalues,
yvalues,
np.transpose(qc_data),
cmap=cMap,
vmin=0,
shading=set_shading,
)
divider = make_axes_locatable(ax)
# Determine correct placement of words on colorbar
tick_nums = (
np.arange(0, len(tick_names) * 2 + 1) / (len(tick_names) * 2) * np.nanmax(qc_data)
)[1::2]
cax = divider.append_axes('bottom', size='5%', pad=0.3)
cbar = self.fig.colorbar(
mesh,
cax=cax,
orientation='horizontal',
spacing='uniform',
ticks=tick_nums,
shrink=0.5,
)
cbar.ax.set_xticklabels(tick_names)
# Set YTitle
dim_name = list(set(self._obj[dsname][qc_data_field].dims) - {'time'})
try:
ytitle = f"{dim_name[0]} ({self._obj[dsname][dim_name[0]].attrs['units']})"
ax.set_ylabel(ytitle)
except KeyError:
pass
# Add which tests were set as text to the plot
unique_values = []
for ii in np.unique(self._obj[dsname][qc_data_field].values):
unique_values.extend(parse_bit(ii))
if len(unique_values) > 0:
unique_values = list(set(unique_values))
unique_values.sort()
unique_values = [str(ii) for ii in unique_values]
self.fig.text(
0.5,
-0.35,
f"QC Tests Tripped: {', '.join(unique_values)}",
transform=ax.transAxes,
horizontalalignment='center',
verticalalignment='center',
fontweight='bold',
)
else:
test_nums = []
for ii, assess in enumerate(flag_assessments):
if assess not in color_lookup:
color_lookup[assess] = list(mplcolors.CSS4_COLORS.keys())[ii]
# Plot green data first.
ax.broken_barh(
barh_list_green,
(ii, ii + 1),
facecolors=color_lookup['Not Failing'],
edgecolor=edgecolor,
**kwargs,
)
# Get test number from flag_mask bitpacked number
test_nums.append(parse_bit(flag_masks[ii]))
# Get masked array data to use mask for finding if/where test is set
data = self._obj[dsname].qcfilter.get_masked_data(
data_field, rm_tests=test_nums[-1]
)
if np.any(data.mask):
# Get time ranges from time and masked data
barh_list = reduce_time_ranges(
xdata.values[data.mask], time_delta=time_delta, broken_barh=True
)
# Check if the bit set is indicating missing data. If so change
# to different plotting color than what is in flag_assessments.
for val in missing_val_long_names:
if re_search(val, flag_meanings[ii]):
assess = 'Missing'
break
# Lay down blocks of tripped tests using correct color
ax.broken_barh(
barh_list,
(ii, ii + 1),
facecolors=color_lookup[assess],
edgecolor=edgecolor,
**kwargs,
)
# Add test description to plot.
ax.text(xdata.values[0], ii + 0.5, ' ' + flag_meanings[ii], va='center')
# Change y ticks to test number
plt.yticks(
[ii + 0.5 for ii in range(0, len(test_nums))],
labels=['Test ' + str(ii[0]) for ii in test_nums],
)
# Set ylimit to number of tests plotted
ax.set_ylim(0, len(flag_assessments))
# Set X Limit - We want the same time axes for all subplots
if not hasattr(self, 'time_rng'):
if time_rng is not None:
self.time_rng = list(time_rng)
else:
self.time_rng = [xdata.min().values, xdata.max().values]
self.set_xrng(self.time_rng, subplot_index)
# Get X format - We want the same time axes for all subplots
if hasattr(self, 'time_fmt'):
ax.xaxis.set_major_formatter(self.time_fmt)
else:
# Set X Format
if len(subplot_index) == 1:
days = self.xrng[subplot_index, 1] - self.xrng[subplot_index, 0]
else:
days = (
self.xrng[subplot_index[0], subplot_index[1], 1]
- self.xrng[subplot_index[0], subplot_index[1], 0]
)
myFmt = common.get_date_format(days)
ax.xaxis.set_major_formatter(myFmt)
self.time_fmt = myFmt
return self.axes[subplot_index]
def fill_between(
self,
field,
dsname=None,
subplot_index=(0,),
set_title=None,
secondary_y=False,
**kwargs,
):
"""
Makes a fill_between plot, based on matplotlib
Parameters
----------
field : str
The name of the field to plot.
dsname : None or str
If there is more than one datastream in the display object the
name of the datastream needs to be specified. If set to None and
there is only one datastream ACT will use the sole datastream
in the object.
subplot_index : 1 or 2D tuple, list, or array
The index of the subplot to set the x range of.
set_title : str
The title for the plot.
secondary_y : boolean
Option to indicate if the data should be plotted on second y-axis.
**kwargs : keyword arguments
The keyword arguments for :func:`plt.plot` (1D timeseries) or
:func:`plt.pcolormesh` (2D timeseries).
Returns
-------
ax : matplotlib axis handle
The matplotlib axis handle of the plot.
"""
if dsname is None and len(self._obj.keys()) > 1:
raise ValueError(
'You must choose a datastream when there are 2 '
'or more datasets in the TimeSeriesDisplay '
'object.'
)
elif dsname is None:
dsname = list(self._obj.keys())[0]
# Get data and dimensions
data = self._obj[dsname][field]
dim = list(self._obj[dsname][field].dims)
xdata = self._obj[dsname][dim[0]]
if 'units' in data.attrs:
ytitle = ''.join(['(', data.attrs['units'], ')'])
else:
ytitle = field
# Get the current plotting axis, add day/night background and plot data
if self.fig is None:
self.fig = plt.figure()
if self.axes is None:
self.axes = np.array([plt.axes()])
self.fig.add_axes(self.axes[0])
# Set ax to appropriate axis
if secondary_y is False:
ax = self.axes[subplot_index]
else:
ax = self.axes[subplot_index].twinx()
ax.fill_between(xdata.values, data, **kwargs)
# Set X Format
if len(subplot_index) == 1:
days = self.xrng[subplot_index, 1] - self.xrng[subplot_index, 0]
else:
days = (
self.xrng[subplot_index[0], subplot_index[1], 1]
- self.xrng[subplot_index[0], subplot_index[1], 0]
)
myFmt = common.get_date_format(days)
ax.xaxis.set_major_formatter(myFmt)
# Set X format - We want the same time axes for all subplots
if not hasattr(self, 'time_fmt'):
self.time_fmt = myFmt
# Put on an xlabel, but only if we are making the bottom-most plot
if subplot_index[0] == self.axes.shape[0] - 1:
self.axes[subplot_index].set_xlabel('Time [UTC]')
# Set YTitle
ax.set_ylabel(ytitle)
# Set Title
if set_title is None:
set_title = ' '.join(
[
dsname,
field,
'on',
dt_utils.numpy_to_arm_date(self._obj[dsname].time.values[0]),
]
)
if secondary_y is False:
ax.set_title(set_title)
return self.axes[subplot_index]
<|code_end|>
|
HistogramDisplay, plot_stairstep_graph encounters ValueError for range
I'm attempting to generate histograms for some Aerosol Optical Depth data. It is rather sparse, and looks like this:
[nan, nan, 0.04041248, nan, nan, nan, nan, nan, nan, 0.08403611, nan, nan, nan, nan, nan, 0.08443181, 0.05368716, nan, 0.07251927, 0.05726737, nan, 0.04878159, nan, 0.06544831, nan, nan, nan, nan, 0.04621224, nan, nan]
Plotting like so:
```
var_name = 'aod_be_polyfit_415'
nbins = 12
try:
ts_display = act.plotting.HistogramDisplay(
nc_data,
figsize=(19,5.5),
subplot_shape=(1,),
)
except Exception as e:
print(f"{type(e)} error generating histogram of netCDF data for {var_name}:\n{e}")
else:
ts_display.plot_stairstep_graph(var_name, bins=nbins, subplot_index=(0,))
```
It encounters this error:
Traceback (most recent call last):
File "plot_aodbe_daily_avgs.py", line 335, in <module>
sys.exit(main())
File "plot_aodbe_daily_avgs.py", line 320, in main
ts_display.plot_stairstep_graph(var_name, bins=nbins, subplot_index=(0,))
File "/home/<username>/.local/lib/python3.9/site-packages/act/plotting/histogramdisplay.py", line 386, in plot_stairstep_graph
my_hist, bins = np.histogram(
File "<__array_function__ internals>", line 180, in histogram
File "/home/<username>/.local/lib/python3.9/site-packages/numpy/lib/histograms.py", line 793, in histogram
bin_edges, uniform_bins = _get_bin_edges(a, bins, range, weights)
File "/home/<username>/.local/lib/python3.9/site-packages/numpy/lib/histograms.py", line 426, in _get_bin_edges
first_edge, last_edge = _get_outer_edges(a, range)
File "/home/<username>/.local/lib/python3.9/site-packages/numpy/lib/histograms.py", line 323, in _get_outer_edges
raise ValueError(
ValueError: autodetected range of [nan, nan] is not finite
Since np.histogram accepts a `range` parameter, I assume this error is because it attempts to auto-calculate the range, but the data contains nans and throws that off. I experimented and found if I replace nan with e.g. 1.0, the plotting works. I have to replace all nans.
I imagine there might be various ways to address this.
* Can this be corrected within ACT, so either A) ACT determines the range better than numpy does, ignoring the nans, or B) accepts a range parameter that it will pass to np.histogram?
* Is there a way you might recommend handling this in the data before attempting to plot a histogram? As I noted, I could replace all nans with other values (-9999), but then there'd need to be a way to mask out that value... and might as well just mask the nans.
|
act/plotting/histogramdisplay.py
<|code_start|>""" Module for Histogram Plotting. """
import matplotlib.pyplot as plt
import numpy as np
import xarray as xr
from ..utils import datetime_utils as dt_utils
from .plot import Display
class HistogramDisplay(Display):
"""
This class is used to make histogram plots. It is inherited from Display
and therefore contains all of Display's attributes and methods.
Examples
--------
To create a TimeSeriesDisplay with 3 rows, simply do:
.. code-block:: python
ds = act.read_netcdf(the_file)
disp = act.plotting.HistogramDisplay(ds, subplot_shape=(3,), figsize=(15, 5))
The HistogramDisplay constructor takes in the same keyword arguments as
plt.subplots. For more information on the plt.subplots keyword arguments,
see the `matplotlib documentation
<https://matplotlib.org/api/_as_gen/matplotlib.pyplot.subplots.html>`_.
If no subplot_shape is provided, then no figure or axis will be created
until add_subplots or plots is called.
"""
def __init__(self, obj, subplot_shape=(1,), ds_name=None, **kwargs):
super().__init__(obj, subplot_shape, ds_name, **kwargs)
def set_xrng(self, xrng, subplot_index=(0,)):
"""
Sets the x range of the plot.
Parameters
----------
xrng : 2 number array
The x limits of the plot.
subplot_index : 1 or 2D tuple, list, or array
The index of the subplot to set the x range of.
"""
if self.axes is None:
raise RuntimeError('set_xrng requires the plot to be displayed.')
if not hasattr(self, 'xrng') and len(self.axes.shape) == 2:
self.xrng = np.zeros((self.axes.shape[0], self.axes.shape[1], 2), dtype='datetime64[D]')
elif not hasattr(self, 'xrng') and len(self.axes.shape) == 1:
self.xrng = np.zeros((self.axes.shape[0], 2), dtype='datetime64[D]')
self.axes[subplot_index].set_xlim(xrng)
self.xrng[subplot_index, :] = np.array(xrng)
def set_yrng(self, yrng, subplot_index=(0,)):
"""
Sets the y range of the plot.
Parameters
----------
yrng : 2 number array
The y limits of the plot.
subplot_index : 1 or 2D tuple, list, or array
The index of the subplot to set the x range of.
"""
if self.axes is None:
raise RuntimeError('set_yrng requires the plot to be displayed.')
if not hasattr(self, 'yrng') and len(self.axes.shape) == 2:
self.yrng = np.zeros((self.axes.shape[0], self.axes.shape[1], 2))
elif not hasattr(self, 'yrng') and len(self.axes.shape) == 1:
self.yrng = np.zeros((self.axes.shape[0], 2))
if yrng[0] == yrng[1]:
yrng[1] = yrng[1] + 1
self.axes[subplot_index].set_ylim(yrng)
self.yrng[subplot_index, :] = yrng
def plot_stacked_bar_graph(
self,
field,
dsname=None,
bins=None,
sortby_field=None,
sortby_bins=None,
subplot_index=(0,),
set_title=None,
density=False,
**kwargs,
):
"""
This procedure will plot a stacked bar graph of a histogram.
Parameters
----------
field : str
The name of the field to take the histogram of.
dsname : str or None
The name of the datastream the field is contained in. Set
to None to let ACT automatically determine this.
bins : array-like or None
The histogram bin boundaries to use. Set to None to use
numpy's default boundaries.
sortby_field : str or None
Set this option to a field name in order to sort the histograms
by a given field parameter. For example, one can sort histograms of CO2
concentration by temperature.
sortby_bins : array-like or None
The bins to sort the histograms by.
subplot_index : tuple
The subplot index to place the plot in
set_title : str
The title of the plot.
density: bool
Set to True to plot a p.d.f. instead of a frequency histogram.
Other keyword arguments will be passed into :func:`matplotlib.pyplot.bar`.
Returns
-------
return_dict : dict
A dictionary containing the plot axis handle, bin boundaries, and
generated histogram.
"""
if dsname is None and len(self._obj.keys()) > 1:
raise ValueError(
'You must choose a datastream when there are 2 '
+ 'or more datasets in the TimeSeriesDisplay '
+ 'object.'
)
elif dsname is None:
dsname = list(self._obj.keys())[0]
xdata = self._obj[dsname][field]
if 'units' in xdata.attrs:
xtitle = ''.join(['(', xdata.attrs['units'], ')'])
else:
xtitle = field
if sortby_field is not None:
ydata = self._obj[dsname][sortby_field]
if bins is not None and sortby_bins is None and sortby_field is not None:
# We will defaut the y direction to have the same # of bins as x
sortby_bins = np.linspace(ydata.values.min(), ydata.values.max(), len(bins))
# Get the current plotting axis, add day/night background and plot data
if self.fig is None:
self.fig = plt.figure()
if self.axes is None:
self.axes = np.array([plt.axes()])
self.fig.add_axes(self.axes[0])
if sortby_field is not None:
if 'units' in ydata.attrs:
ytitle = ''.join(['(', ydata.attrs['units'], ')'])
else:
ytitle = field
if bins is None:
my_hist, x_bins, y_bins = np.histogram2d(
xdata.values.flatten(), ydata.values.flatten(), density=density
)
else:
my_hist, x_bins, y_bins = np.histogram2d(
xdata.values.flatten(),
ydata.values.flatten(),
density=density,
bins=[bins, sortby_bins],
)
x_inds = (x_bins[:-1] + x_bins[1:]) / 2.0
self.axes[subplot_index].bar(
x_inds,
my_hist[:, 0].flatten(),
label=(str(y_bins[0]) + ' to ' + str(y_bins[1])),
**kwargs,
)
for i in range(1, len(y_bins) - 1):
self.axes[subplot_index].bar(
x_inds,
my_hist[:, i].flatten(),
bottom=my_hist[:, i - 1],
label=(str(y_bins[i]) + ' to ' + str(y_bins[i + 1])),
**kwargs,
)
self.axes[subplot_index].legend()
else:
if bins is None:
bmin = np.nanmin(xdata)
bmax = np.nanmax(xdata)
bins = np.arange(bmin, bmax, (bmax - bmin) / 10.0)
my_hist, bins = np.histogram(xdata.values.flatten(), bins=bins, density=density)
x_inds = (bins[:-1] + bins[1:]) / 2.0
self.axes[subplot_index].bar(x_inds, my_hist)
# Set Title
if set_title is None:
set_title = ' '.join(
[
dsname,
field,
'on',
dt_utils.numpy_to_arm_date(self._obj[dsname].time.values[0]),
]
)
self.axes[subplot_index].set_title(set_title)
self.axes[subplot_index].set_ylabel('count')
self.axes[subplot_index].set_xlabel(xtitle)
return_dict = {}
return_dict['plot_handle'] = self.axes[subplot_index]
if 'x_bins' in locals():
return_dict['x_bins'] = x_bins
return_dict['y_bins'] = y_bins
else:
return_dict['bins'] = bins
return_dict['histogram'] = my_hist
return return_dict
def plot_size_distribution(
self, field, bins, time=None, dsname=None, subplot_index=(0,), set_title=None, **kwargs
):
"""
This procedure plots a stairstep plot of a size distribution. This is
useful for plotting size distributions and waveforms.
Parameters
----------
field : str
The name of the field to plot the spectrum from.
bins : str or array-like
The name of the field that stores the bins for the spectra.
time : none or datetime
If None, spectra to plot will be automatically determined.
Otherwise, specify this field for the time period to plot.
dsname : str
The name of the Dataset to plot. Set to None to have
ACT automatically determine this.
subplot_index : tuple
The subplot index to place the plot in.
set_title : str or None
Use this to set the title.
Additional keyword arguments will be passed into :func:`matplotlib.pyplot.step`
Returns
-------
ax : matplotlib axis handle
The matplotlib axis handle referring to the plot.
"""
if dsname is None and len(self._obj.keys()) > 1:
raise ValueError(
'You must choose a datastream when there are 2 '
+ 'or more datasets in the TimeSeriesDisplay '
+ 'object.'
)
elif dsname is None:
dsname = list(self._obj.keys())[0]
xdata = self._obj[dsname][field]
if isinstance(bins, str):
bins = self._obj[dsname][bins]
else:
bins = xr.DataArray(bins)
if 'units' in bins.attrs:
xtitle = ''.join(['(', bins.attrs['units'], ')'])
else:
xtitle = 'Bin #'
if 'units' in xdata.attrs:
ytitle = ''.join(['(', xdata.attrs['units'], ')'])
else:
ytitle = field
if len(xdata.dims) > 1 and time is None:
raise ValueError(
'Input data has more than one dimension, ' + 'you must specify a time to plot!'
)
elif len(xdata.dims) > 1:
xdata = xdata.sel(time=time, method='nearest')
if len(bins.dims) > 1 or len(bins.values) != len(xdata.values):
raise ValueError(
'Bins must be a one dimensional field whose '
+ 'length is equal to the field length!'
)
# Get the current plotting axis, add day/night background and plot data
if self.fig is None:
self.fig = plt.figure()
if self.axes is None:
self.axes = np.array([plt.axes()])
self.fig.add_axes(self.axes[0])
# Set Title
if set_title is None:
set_title = ' '.join(
[
dsname,
field,
'on',
dt_utils.numpy_to_arm_date(self._obj[dsname].time.values[0]),
]
)
self.axes[subplot_index].set_title(set_title)
self.axes[subplot_index].step(bins.values, xdata.values)
self.axes[subplot_index].set_xlabel(xtitle)
self.axes[subplot_index].set_ylabel(ytitle)
return self.axes[subplot_index]
def plot_stairstep_graph(
self,
field,
dsname=None,
bins=None,
sortby_field=None,
sortby_bins=None,
subplot_index=(0,),
set_title=None,
density=False,
**kwargs,
):
"""
This procedure will plot a stairstep plot of a histogram.
Parameters
----------
field : str
The name of the field to take the histogram of.
dsname : str or None
The name of the datastream the field is contained in. Set
to None to let ACT automatically determine this.
bins : array-like or None
The histogram bin boundaries to use. Set to None to use
numpy's default boundaries.
sortby_field : str or None
Set this option to a field name in order to sort the histograms
by a given field parameter. For example, one can sort histograms of CO2
concentration by temperature.
sortby_bins : array-like or None
The bins to sort the histograms by.
subplot_index : tuple
The subplot index to place the plot in.
set_title : str
The title of the plot.
density : bool
Set to True to plot a p.d.f. instead of a frequency histogram.
Other keyword arguments will be passed into :func:`matplotlib.pyplot.step`.
Returns
-------
return_dict : dict
A dictionary containing the plot axis handle, bin boundaries, and
generated histogram.
"""
if dsname is None and len(self._obj.keys()) > 1:
raise ValueError(
'You must choose a datastream when there are 2 '
+ 'or more datasets in the TimeSeriesDisplay '
+ 'object.'
)
elif dsname is None:
dsname = list(self._obj.keys())[0]
xdata = self._obj[dsname][field]
if 'units' in xdata.attrs:
xtitle = ''.join(['(', xdata.attrs['units'], ')'])
else:
xtitle = field
if sortby_field is not None:
ydata = self._obj[dsname][sortby_field]
if bins is not None and sortby_bins is None and sortby_field is not None:
# We will defaut the y direction to have the same # of bins as x
sortby_bins = np.linspace(ydata.values.min(), ydata.values.max(), len(bins))
# Get the current plotting axis, add day/night background and plot data
if self.fig is None:
self.fig = plt.figure()
if self.axes is None:
self.axes = np.array([plt.axes()])
self.fig.add_axes(self.axes[0])
if sortby_field is not None:
if 'units' in ydata.attrs:
ytitle = ''.join(['(', ydata.attrs['units'], ')'])
else:
ytitle = field
if bins is None:
my_hist, x_bins, y_bins = np.histogram2d(
xdata.values.flatten(), ydata.values.flatten(), density=density
)
else:
my_hist, x_bins, y_bins = np.histogram2d(
xdata.values.flatten(),
ydata.values.flatten(),
density=density,
bins=[bins, sortby_bins],
)
x_inds = (x_bins[:-1] + x_bins[1:]) / 2.0
self.axes[subplot_index].step(
x_inds,
my_hist[:, 0].flatten(),
label=(str(y_bins[0]) + ' to ' + str(y_bins[1])),
**kwargs,
)
for i in range(1, len(y_bins) - 1):
self.axes[subplot_index].step(
x_inds,
my_hist[:, i].flatten(),
label=(str(y_bins[i]) + ' to ' + str(y_bins[i + 1])),
**kwargs,
)
self.axes[subplot_index].legend()
else:
my_hist, bins = np.histogram(xdata.values.flatten(), bins=bins, density=density)
x_inds = (bins[:-1] + bins[1:]) / 2.0
self.axes[subplot_index].step(x_inds, my_hist, **kwargs)
# Set Title
if set_title is None:
set_title = ' '.join(
[
dsname,
field,
'on',
dt_utils.numpy_to_arm_date(self._obj[dsname].time.values[0]),
]
)
self.axes[subplot_index].set_title(set_title)
self.axes[subplot_index].set_ylabel('count')
self.axes[subplot_index].set_xlabel(xtitle)
return_dict = {}
return_dict['plot_handle'] = self.axes[subplot_index]
if 'x_bins' in locals():
return_dict['x_bins'] = x_bins
return_dict['y_bins'] = y_bins
else:
return_dict['bins'] = bins
return_dict['histogram'] = my_hist
return return_dict
def plot_heatmap(
self,
x_field,
y_field,
dsname=None,
x_bins=None,
y_bins=None,
subplot_index=(0,),
set_title=None,
density=False,
set_shading='auto',
**kwargs,
):
"""
This procedure will plot a heatmap of a histogram from 2 variables.
Parameters
----------
x_field : str
The name of the field to take the histogram of on the X axis.
y_field : str
The name of the field to take the histogram of on the Y axis.
dsname : str or None
The name of the datastream the field is contained in. Set
to None to let ACT automatically determine this.
x_bins : array-like or None
The histogram bin boundaries to use for the variable on the X axis.
Set to None to use numpy's default boundaries.
y_bins : array-like or None
The histogram bin boundaries to use for the variable on the Y axis.
Set to None to use numpy's default boundaries.
subplot_index : tuple
The subplot index to place the plot in
set_title : str
The title of the plot.
density : bool
Set to True to plot a p.d.f. instead of a frequency histogram.
set_shading : string
Option to to set the matplotlib.pcolormesh shading parameter.
Default to 'auto'
Other keyword arguments will be passed into :func:`matplotlib.pyplot.pcolormesh`.
Returns
-------
return_dict : dict
A dictionary containing the plot axis handle, bin boundaries, and
generated histogram.
"""
if dsname is None and len(self._obj.keys()) > 1:
raise ValueError(
'You must choose a datastream when there are 2 '
'or more datasets in the TimeSeriesDisplay '
'object.'
)
elif dsname is None:
dsname = list(self._obj.keys())[0]
xdata = self._obj[dsname][x_field]
if 'units' in xdata.attrs:
xtitle = ''.join(['(', xdata.attrs['units'], ')'])
else:
xtitle = x_field
ydata = self._obj[dsname][y_field]
if x_bins is not None and y_bins is None:
# We will defaut the y direction to have the same # of bins as x
y_bins = np.linspace(ydata.values.min(), ydata.values.max(), len(x_bins))
# Get the current plotting axis, add day/night background and plot data
if self.fig is None:
self.fig = plt.figure()
if self.axes is None:
self.axes = np.array([plt.axes()])
self.fig.add_axes(self.axes[0])
if 'units' in ydata.attrs:
ytitle = ''.join(['(', ydata.attrs['units'], ')'])
else:
ytitle = y_field
if x_bins is None:
my_hist, x_bins, y_bins = np.histogram2d(
xdata.values.flatten(), ydata.values.flatten(), density=density
)
else:
my_hist, x_bins, y_bins = np.histogram2d(
xdata.values.flatten(),
ydata.values.flatten(),
density=density,
bins=[x_bins, y_bins],
)
x_inds = (x_bins[:-1] + x_bins[1:]) / 2.0
y_inds = (y_bins[:-1] + y_bins[1:]) / 2.0
xi, yi = np.meshgrid(x_inds, y_inds, indexing='ij')
mesh = self.axes[subplot_index].pcolormesh(xi, yi, my_hist, shading=set_shading, **kwargs)
# Set Title
if set_title is None:
set_title = ' '.join(
[
dsname,
'on',
dt_utils.numpy_to_arm_date(self._obj[dsname].time.values[0]),
]
)
self.axes[subplot_index].set_title(set_title)
self.axes[subplot_index].set_ylabel(ytitle)
self.axes[subplot_index].set_xlabel(xtitle)
self.add_colorbar(mesh, title='count', subplot_index=subplot_index)
return_dict = {}
return_dict['plot_handle'] = self.axes[subplot_index]
return_dict['x_bins'] = x_bins
return_dict['y_bins'] = y_bins
return_dict['histogram'] = my_hist
return return_dict
<|code_end|>
|
act/plotting/histogramdisplay.py
<|code_start|>""" Module for Histogram Plotting. """
import matplotlib.pyplot as plt
import numpy as np
import xarray as xr
from ..utils import datetime_utils as dt_utils
from .plot import Display
class HistogramDisplay(Display):
"""
This class is used to make histogram plots. It is inherited from Display
and therefore contains all of Display's attributes and methods.
Examples
--------
To create a TimeSeriesDisplay with 3 rows, simply do:
.. code-block:: python
ds = act.read_netcdf(the_file)
disp = act.plotting.HistogramDisplay(ds, subplot_shape=(3,), figsize=(15, 5))
The HistogramDisplay constructor takes in the same keyword arguments as
plt.subplots. For more information on the plt.subplots keyword arguments,
see the `matplotlib documentation
<https://matplotlib.org/api/_as_gen/matplotlib.pyplot.subplots.html>`_.
If no subplot_shape is provided, then no figure or axis will be created
until add_subplots or plots is called.
"""
def __init__(self, obj, subplot_shape=(1,), ds_name=None, **kwargs):
super().__init__(obj, subplot_shape, ds_name, **kwargs)
def set_xrng(self, xrng, subplot_index=(0,)):
"""
Sets the x range of the plot.
Parameters
----------
xrng : 2 number array
The x limits of the plot.
subplot_index : 1 or 2D tuple, list, or array
The index of the subplot to set the x range of.
"""
if self.axes is None:
raise RuntimeError('set_xrng requires the plot to be displayed.')
if not hasattr(self, 'xrng') and len(self.axes.shape) == 2:
self.xrng = np.zeros((self.axes.shape[0], self.axes.shape[1], 2), dtype='datetime64[D]')
elif not hasattr(self, 'xrng') and len(self.axes.shape) == 1:
self.xrng = np.zeros((self.axes.shape[0], 2), dtype='datetime64[D]')
self.axes[subplot_index].set_xlim(xrng)
self.xrng[subplot_index, :] = np.array(xrng)
def set_yrng(self, yrng, subplot_index=(0,)):
"""
Sets the y range of the plot.
Parameters
----------
yrng : 2 number array
The y limits of the plot.
subplot_index : 1 or 2D tuple, list, or array
The index of the subplot to set the x range of.
"""
if self.axes is None:
raise RuntimeError('set_yrng requires the plot to be displayed.')
if not hasattr(self, 'yrng') and len(self.axes.shape) == 2:
self.yrng = np.zeros((self.axes.shape[0], self.axes.shape[1], 2))
elif not hasattr(self, 'yrng') and len(self.axes.shape) == 1:
self.yrng = np.zeros((self.axes.shape[0], 2))
if yrng[0] == yrng[1]:
yrng[1] = yrng[1] + 1
self.axes[subplot_index].set_ylim(yrng)
self.yrng[subplot_index, :] = yrng
def _get_data(self, dsname, fields):
if isinstance(fields, str):
fields = [fields]
return self._obj[dsname][fields].dropna('time')
def plot_stacked_bar_graph(
self,
field,
dsname=None,
bins=None,
sortby_field=None,
sortby_bins=None,
subplot_index=(0,),
set_title=None,
density=False,
**kwargs,
):
"""
This procedure will plot a stacked bar graph of a histogram.
Parameters
----------
field : str
The name of the field to take the histogram of.
dsname : str or None
The name of the datastream the field is contained in. Set
to None to let ACT automatically determine this.
bins : array-like or None
The histogram bin boundaries to use. Set to None to use
numpy's default boundaries.
sortby_field : str or None
Set this option to a field name in order to sort the histograms
by a given field parameter. For example, one can sort histograms of CO2
concentration by temperature.
sortby_bins : array-like or None
The bins to sort the histograms by.
subplot_index : tuple
The subplot index to place the plot in
set_title : str
The title of the plot.
density: bool
Set to True to plot a p.d.f. instead of a frequency histogram.
Other keyword arguments will be passed into :func:`matplotlib.pyplot.bar`.
Returns
-------
return_dict : dict
A dictionary containing the plot axis handle, bin boundaries, and
generated histogram.
"""
if dsname is None and len(self._obj.keys()) > 1:
raise ValueError(
'You must choose a datastream when there are 2 '
+ 'or more datasets in the TimeSeriesDisplay '
+ 'object.'
)
elif dsname is None:
dsname = list(self._obj.keys())[0]
if sortby_field is not None:
ds = self._get_data(dsname, [field, sortby_field])
xdata, ydata = ds[field], ds[sortby_field]
else:
xdata = self._get_data(dsname, field)[field]
if 'units' in xdata.attrs:
xtitle = ''.join(['(', xdata.attrs['units'], ')'])
else:
xtitle = field
if bins is not None and sortby_bins is None and sortby_field is not None:
# We will defaut the y direction to have the same # of bins as x
sortby_bins = np.linspace(ydata.values.min(), ydata.values.max(), len(bins))
# Get the current plotting axis, add day/night background and plot data
if self.fig is None:
self.fig = plt.figure()
if self.axes is None:
self.axes = np.array([plt.axes()])
self.fig.add_axes(self.axes[0])
if sortby_field is not None:
if 'units' in ydata.attrs:
ytitle = ''.join(['(', ydata.attrs['units'], ')'])
else:
ytitle = field
if bins is None:
my_hist, x_bins, y_bins = np.histogram2d(
xdata.values.flatten(), ydata.values.flatten(), density=density
)
else:
my_hist, x_bins, y_bins = np.histogram2d(
xdata.values.flatten(),
ydata.values.flatten(),
density=density,
bins=[bins, sortby_bins],
)
x_inds = (x_bins[:-1] + x_bins[1:]) / 2.0
self.axes[subplot_index].bar(
x_inds,
my_hist[:, 0].flatten(),
label=(str(y_bins[0]) + ' to ' + str(y_bins[1])),
**kwargs,
)
for i in range(1, len(y_bins) - 1):
self.axes[subplot_index].bar(
x_inds,
my_hist[:, i].flatten(),
bottom=my_hist[:, i - 1],
label=(str(y_bins[i]) + ' to ' + str(y_bins[i + 1])),
**kwargs,
)
self.axes[subplot_index].legend()
else:
if bins is None:
bmin = np.nanmin(xdata)
bmax = np.nanmax(xdata)
bins = np.arange(bmin, bmax, (bmax - bmin) / 10.0)
my_hist, bins = np.histogram(xdata.values.flatten(), bins=bins, density=density)
x_inds = (bins[:-1] + bins[1:]) / 2.0
self.axes[subplot_index].bar(x_inds, my_hist)
# Set Title
if set_title is None:
set_title = ' '.join(
[
dsname,
field,
'on',
dt_utils.numpy_to_arm_date(self._obj[dsname].time.values[0]),
]
)
self.axes[subplot_index].set_title(set_title)
self.axes[subplot_index].set_ylabel('count')
self.axes[subplot_index].set_xlabel(xtitle)
return_dict = {}
return_dict['plot_handle'] = self.axes[subplot_index]
if 'x_bins' in locals():
return_dict['x_bins'] = x_bins
return_dict['y_bins'] = y_bins
else:
return_dict['bins'] = bins
return_dict['histogram'] = my_hist
return return_dict
def plot_size_distribution(
self, field, bins, time=None, dsname=None, subplot_index=(0,), set_title=None, **kwargs
):
"""
This procedure plots a stairstep plot of a size distribution. This is
useful for plotting size distributions and waveforms.
Parameters
----------
field : str
The name of the field to plot the spectrum from.
bins : str or array-like
The name of the field that stores the bins for the spectra.
time : none or datetime
If None, spectra to plot will be automatically determined.
Otherwise, specify this field for the time period to plot.
dsname : str
The name of the Dataset to plot. Set to None to have
ACT automatically determine this.
subplot_index : tuple
The subplot index to place the plot in.
set_title : str or None
Use this to set the title.
Additional keyword arguments will be passed into :func:`matplotlib.pyplot.step`
Returns
-------
ax : matplotlib axis handle
The matplotlib axis handle referring to the plot.
"""
if dsname is None and len(self._obj.keys()) > 1:
raise ValueError(
'You must choose a datastream when there are 2 '
+ 'or more datasets in the TimeSeriesDisplay '
+ 'object.'
)
elif dsname is None:
dsname = list(self._obj.keys())[0]
xdata = self._get_data(dsname, field)[field]
if isinstance(bins, str):
bins = self._obj[dsname][bins]
else:
bins = xr.DataArray(bins)
if 'units' in bins.attrs:
xtitle = ''.join(['(', bins.attrs['units'], ')'])
else:
xtitle = 'Bin #'
if 'units' in xdata.attrs:
ytitle = ''.join(['(', xdata.attrs['units'], ')'])
else:
ytitle = field
if len(xdata.dims) > 1 and time is None:
raise ValueError(
'Input data has more than one dimension, ' + 'you must specify a time to plot!'
)
elif len(xdata.dims) > 1:
xdata = xdata.sel(time=time, method='nearest')
if len(bins.dims) > 1 or len(bins.values) != len(xdata.values):
raise ValueError(
'Bins must be a one dimensional field whose '
+ 'length is equal to the field length!'
)
# Get the current plotting axis, add day/night background and plot data
if self.fig is None:
self.fig = plt.figure()
if self.axes is None:
self.axes = np.array([plt.axes()])
self.fig.add_axes(self.axes[0])
# Set Title
if set_title is None:
set_title = ' '.join(
[
dsname,
field,
'on',
dt_utils.numpy_to_arm_date(self._obj[dsname].time.values[0]),
]
)
self.axes[subplot_index].set_title(set_title)
self.axes[subplot_index].step(bins.values, xdata.values)
self.axes[subplot_index].set_xlabel(xtitle)
self.axes[subplot_index].set_ylabel(ytitle)
return self.axes[subplot_index]
def plot_stairstep_graph(
self,
field,
dsname=None,
bins=None,
sortby_field=None,
sortby_bins=None,
subplot_index=(0,),
set_title=None,
density=False,
**kwargs,
):
"""
This procedure will plot a stairstep plot of a histogram.
Parameters
----------
field : str
The name of the field to take the histogram of.
dsname : str or None
The name of the datastream the field is contained in. Set
to None to let ACT automatically determine this.
bins : array-like or None
The histogram bin boundaries to use. Set to None to use
numpy's default boundaries.
sortby_field : str or None
Set this option to a field name in order to sort the histograms
by a given field parameter. For example, one can sort histograms of CO2
concentration by temperature.
sortby_bins : array-like or None
The bins to sort the histograms by.
subplot_index : tuple
The subplot index to place the plot in.
set_title : str
The title of the plot.
density : bool
Set to True to plot a p.d.f. instead of a frequency histogram.
Other keyword arguments will be passed into :func:`matplotlib.pyplot.step`.
Returns
-------
return_dict : dict
A dictionary containing the plot axis handle, bin boundaries, and
generated histogram.
"""
if dsname is None and len(self._obj.keys()) > 1:
raise ValueError(
'You must choose a datastream when there are 2 '
+ 'or more datasets in the TimeSeriesDisplay '
+ 'object.'
)
elif dsname is None:
dsname = list(self._obj.keys())[0]
xdata = self._get_data(dsname, field)[field]
if 'units' in xdata.attrs:
xtitle = ''.join(['(', xdata.attrs['units'], ')'])
else:
xtitle = field
if sortby_field is not None:
ydata = self._obj[dsname][sortby_field]
if bins is not None and sortby_bins is None and sortby_field is not None:
# We will defaut the y direction to have the same # of bins as x
sortby_bins = np.linspace(ydata.values.min(), ydata.values.max(), len(bins))
# Get the current plotting axis, add day/night background and plot data
if self.fig is None:
self.fig = plt.figure()
if self.axes is None:
self.axes = np.array([plt.axes()])
self.fig.add_axes(self.axes[0])
if sortby_field is not None:
if 'units' in ydata.attrs:
ytitle = ''.join(['(', ydata.attrs['units'], ')'])
else:
ytitle = field
if bins is None:
my_hist, x_bins, y_bins = np.histogram2d(
xdata.values.flatten(), ydata.values.flatten(), density=density
)
else:
my_hist, x_bins, y_bins = np.histogram2d(
xdata.values.flatten(),
ydata.values.flatten(),
density=density,
bins=[bins, sortby_bins],
)
x_inds = (x_bins[:-1] + x_bins[1:]) / 2.0
self.axes[subplot_index].step(
x_inds,
my_hist[:, 0].flatten(),
label=(str(y_bins[0]) + ' to ' + str(y_bins[1])),
**kwargs,
)
for i in range(1, len(y_bins) - 1):
self.axes[subplot_index].step(
x_inds,
my_hist[:, i].flatten(),
label=(str(y_bins[i]) + ' to ' + str(y_bins[i + 1])),
**kwargs,
)
self.axes[subplot_index].legend()
else:
my_hist, bins = np.histogram(xdata.values.flatten(), bins=bins, density=density)
x_inds = (bins[:-1] + bins[1:]) / 2.0
self.axes[subplot_index].step(x_inds, my_hist, **kwargs)
# Set Title
if set_title is None:
set_title = ' '.join(
[
dsname,
field,
'on',
dt_utils.numpy_to_arm_date(self._obj[dsname].time.values[0]),
]
)
self.axes[subplot_index].set_title(set_title)
self.axes[subplot_index].set_ylabel('count')
self.axes[subplot_index].set_xlabel(xtitle)
return_dict = {}
return_dict['plot_handle'] = self.axes[subplot_index]
if 'x_bins' in locals():
return_dict['x_bins'] = x_bins
return_dict['y_bins'] = y_bins
else:
return_dict['bins'] = bins
return_dict['histogram'] = my_hist
return return_dict
def plot_heatmap(
self,
x_field,
y_field,
dsname=None,
x_bins=None,
y_bins=None,
subplot_index=(0,),
set_title=None,
density=False,
set_shading='auto',
**kwargs,
):
"""
This procedure will plot a heatmap of a histogram from 2 variables.
Parameters
----------
x_field : str
The name of the field to take the histogram of on the X axis.
y_field : str
The name of the field to take the histogram of on the Y axis.
dsname : str or None
The name of the datastream the field is contained in. Set
to None to let ACT automatically determine this.
x_bins : array-like or None
The histogram bin boundaries to use for the variable on the X axis.
Set to None to use numpy's default boundaries.
y_bins : array-like or None
The histogram bin boundaries to use for the variable on the Y axis.
Set to None to use numpy's default boundaries.
subplot_index : tuple
The subplot index to place the plot in
set_title : str
The title of the plot.
density : bool
Set to True to plot a p.d.f. instead of a frequency histogram.
set_shading : string
Option to to set the matplotlib.pcolormesh shading parameter.
Default to 'auto'
Other keyword arguments will be passed into :func:`matplotlib.pyplot.pcolormesh`.
Returns
-------
return_dict : dict
A dictionary containing the plot axis handle, bin boundaries, and
generated histogram.
"""
if dsname is None and len(self._obj.keys()) > 1:
raise ValueError(
'You must choose a datastream when there are 2 '
'or more datasets in the TimeSeriesDisplay '
'object.'
)
elif dsname is None:
dsname = list(self._obj.keys())[0]
ds = self._get_data(dsname, [x_field, y_field])
xdata, ydata = ds[x_field], ds[y_field]
if 'units' in xdata.attrs:
xtitle = ''.join(['(', xdata.attrs['units'], ')'])
else:
xtitle = x_field
if x_bins is not None and y_bins is None:
# We will defaut the y direction to have the same # of bins as x
y_bins = np.linspace(ydata.values.min(), ydata.values.max(), len(x_bins))
# Get the current plotting axis, add day/night background and plot data
if self.fig is None:
self.fig = plt.figure()
if self.axes is None:
self.axes = np.array([plt.axes()])
self.fig.add_axes(self.axes[0])
if 'units' in ydata.attrs:
ytitle = ''.join(['(', ydata.attrs['units'], ')'])
else:
ytitle = y_field
if x_bins is None:
my_hist, x_bins, y_bins = np.histogram2d(
xdata.values.flatten(), ydata.values.flatten(), density=density
)
else:
my_hist, x_bins, y_bins = np.histogram2d(
xdata.values.flatten(),
ydata.values.flatten(),
density=density,
bins=[x_bins, y_bins],
)
x_inds = (x_bins[:-1] + x_bins[1:]) / 2.0
y_inds = (y_bins[:-1] + y_bins[1:]) / 2.0
xi, yi = np.meshgrid(x_inds, y_inds, indexing='ij')
mesh = self.axes[subplot_index].pcolormesh(xi, yi, my_hist, shading=set_shading, **kwargs)
# Set Title
if set_title is None:
set_title = ' '.join(
[
dsname,
'on',
dt_utils.numpy_to_arm_date(self._obj[dsname].time.values[0]),
]
)
self.axes[subplot_index].set_title(set_title)
self.axes[subplot_index].set_ylabel(ytitle)
self.axes[subplot_index].set_xlabel(xtitle)
self.add_colorbar(mesh, title='count', subplot_index=subplot_index)
return_dict = {}
return_dict['plot_handle'] = self.axes[subplot_index]
return_dict['x_bins'] = x_bins
return_dict['y_bins'] = y_bins
return_dict['histogram'] = my_hist
return return_dict
<|code_end|>
|
Make the NOAA PSL reader more generic
The current reader only works with wind profiler data from the CTD site (see https://github.com/ARM-DOE/ACT/blob/main/act/io/noaapsl.py#L36)... we need to make this more generic to support other sites, and other date types from this site since the files are similar in structure and the name of the reader is "NOAA PSL" which covers a wide range of data
|
act/io/__init__.py
<|code_start|>"""
This module contains procedures for reading and writing various ARM datasets.
"""
from .armfiles import WriteDataset, check_arm_standards, create_obj_from_arm_dod, read_netcdf
from .csvfiles import read_csv
from .mpl import proc_sigma_mplv5_read, read_sigma_mplv5
from .noaagml import (
read_gml,
read_gml_co2,
read_gml_halo,
read_gml_met,
read_gml_ozone,
read_gml_radiation,
)
from .noaapsl import read_psl_wind_profiler
from .pysp2 import read_hk_file, read_sp2, read_sp2_dat
<|code_end|>
act/io/noaapsl.py
<|code_start|>"""
Modules for reading in NOAA PSL data.
"""
import datetime as dt
import numpy as np
import pandas as pd
import xarray as xr
def read_psl_wind_profiler(filename, transpose=True):
"""
Returns `xarray.Dataset` with stored data and metadata from a user-defined
NOAA PSL wind profiler file.
Parameters
----------
filename : str
Name of file(s) to read.
transpose : bool
True to transpose the data.
Return
------
obj_low : Xarray.dataset
Standard Xarray dataset with the data for low mode
obj_high : Xarray.dataset
Standard Xarray dataset with the data for high mode.
"""
# read file with pandas for preparation.
df = pd.read_csv(filename, header=None)
# Get location of where each table begins
index_list = df[0] == ' CTD'
idx = np.where(index_list)
# Get header of each column of data.
column_list = list(df.loc[9][0].split())
beam_vars = ['RAD', 'CNT', 'SNR', 'QC']
for i, c in enumerate(column_list):
if c in beam_vars:
if column_list.count(c) > 2:
column_list[i] = c + '1'
elif column_list.count(c) > 1:
column_list[i] = c + '2'
elif column_list.count(c) > 0:
column_list[i] = c + '3'
# Loop through column data only which appears after 10 lines of metadata.
# Year, Month, day, hour, minute, second, utc offset
low = []
hi = []
for i in range(idx[0].shape[0] - 1):
# index each table by using the idx of when CTD appears.
# str split is use as 2 spaces are added to each data point,
# convert to float.
date_str = df.iloc[idx[0][i] + 3]
date_str = list(filter(None, date_str[0].split(' ')))
date_str = list(map(int, date_str))
# Datetime not taking into account the utc offset yet
time = dt.datetime(
2000 + date_str[0],
date_str[1],
date_str[2],
date_str[3],
date_str[4],
date_str[5],
)
mode = df.iloc[idx[0][i] + 7][0]
mode = int(mode.split(' ')[-1])
df_array = np.array(
df.iloc[idx[0][i] + 10 : idx[0][i + 1] - 1][0].str.split(r'\s{2,}').tolist(),
dtype='float',
)
df_add = pd.DataFrame(df_array, columns=column_list)
df_add = df_add.replace(999999.0, np.nan)
xr_add = df_add.to_xarray()
xr_add = xr_add.swap_dims({'index': 'height'})
xr_add = xr_add.reset_coords('index')
xr_add = xr_add.assign_coords({'time': np.array(time), 'height': xr_add['HT'].values})
if mode < 1000.0:
low.append(xr_add)
else:
hi.append(xr_add)
obj_low = xr.concat(low, 'time')
obj_hi = xr.concat(hi, 'time')
# Adding site information line 1
site_loc = df.iloc[idx[0][0]]
site_list = site_loc.str.split(r'\s{2}').tolist()
site = site_list[0][0].strip()
obj_low.attrs['site_identifier'] = site
obj_hi.attrs['site_identifier'] = site
# Adding data type and revision number line 2.
rev = df.loc[idx[0][0] + 1]
rev_list = rev.str.split(r'\s{3}').tolist()
rev_array = np.array(rev_list[0])
obj_low.attrs['data_type'] = rev_array[0].strip()
obj_hi.attrs['data_type'] = rev_array[0].strip()
obj_low.attrs['revision_number'] = rev_array[1].strip()
obj_hi.attrs['revision_number'] = rev_array[1].strip()
# Adding coordinate attributes line 3.
coords = df.loc[idx[0][0] + 2]
coords_list = coords.str.split(r'\s{2,}').tolist()
coords_list[0].remove('')
coords_array = np.array(coords_list[0], dtype='float32')
obj_low.attrs['latitude'] = np.array([coords_array[0]])
obj_hi.attrs['latitude'] = np.array([coords_array[0]])
obj_low.attrs['longitude'] = np.array([coords_array[1]])
obj_hi.attrs['longitude'] = np.array([coords_array[1]])
obj_low.attrs['altitude'] = np.array([coords_array[2]])
obj_hi.attrs['altitude'] = np.array([coords_array[2]])
# Adding azimuth and elevation line 9
az_el = df.loc[idx[0][0] + 8]
az_el_list = az_el.str.split(r'\s{2,}').tolist()
az_el_list[0].remove('')
az_el_array = np.array(az_el_list[0])
az = []
el = []
for i in az_el_array:
sep = i.split()
az.append(sep[0])
el.append(sep[1])
az_array = np.array(az, dtype='float32')
el_array = np.array(el, dtype='float32')
obj_low.attrs['azimuth'] = az_array
obj_hi.attrs['azimuth'] = az_array
obj_low.attrs['elevation'] = el_array
obj_hi.attrs['elevation'] = el_array
if transpose:
obj_low = obj_low.transpose()
obj_hi = obj_hi.transpose()
return obj_low, obj_hi
<|code_end|>
|
act/io/__init__.py
<|code_start|>"""
This module contains procedures for reading and writing various ARM datasets.
"""
from .armfiles import WriteDataset, check_arm_standards, create_obj_from_arm_dod, read_netcdf
from .csvfiles import read_csv
from .mpl import proc_sigma_mplv5_read, read_sigma_mplv5
from .noaagml import (
read_gml,
read_gml_co2,
read_gml_halo,
read_gml_met,
read_gml_ozone,
read_gml_radiation,
)
from .noaapsl import read_psl_wind_profiler, read_psl_wind_profiler_temperature
from .pysp2 import read_hk_file, read_sp2, read_sp2_dat
<|code_end|>
act/io/noaapsl.py
<|code_start|>"""
Modules for reading in NOAA PSL data.
"""
from datetime import datetime
import fsspec
import numpy as np
import pandas as pd
import xarray as xr
def read_psl_wind_profiler(filename, transpose=True):
"""
Returns `xarray.Dataset` with stored data and metadata from a user-defined
NOAA PSL wind profiler file.
Parameters
----------
filename : str
Name of file(s) to read.
transpose : bool
True to transpose the data.
Return
------
obj_low : Xarray.dataset
Standard Xarray dataset with the data for low mode
obj_high : Xarray.dataset
Standard Xarray dataset with the data for high mode.
"""
# read file with pandas for preparation.
df = pd.read_csv(filename, header=None)
# Get location of where each table begins
index_list = df[0] == ' CTD'
idx = np.where(index_list)
# Get header of each column of data.
column_list = list(df.loc[9][0].split())
beam_vars = ['RAD', 'CNT', 'SNR', 'QC']
for i, c in enumerate(column_list):
if c in beam_vars:
if column_list.count(c) > 2:
column_list[i] = c + '1'
elif column_list.count(c) > 1:
column_list[i] = c + '2'
elif column_list.count(c) > 0:
column_list[i] = c + '3'
# Loop through column data only which appears after 10 lines of metadata.
# Year, Month, day, hour, minute, second, utc offset
low = []
hi = []
for i in range(idx[0].shape[0] - 1):
# index each table by using the idx of when CTD appears.
# str split is use as 2 spaces are added to each data point,
# convert to float.
date_str = df.iloc[idx[0][i] + 3]
date_str = list(filter(None, date_str[0].split(' ')))
date_str = list(map(int, date_str))
# Datetime not taking into account the utc offset yet
time = datetime(
2000 + date_str[0],
date_str[1],
date_str[2],
date_str[3],
date_str[4],
date_str[5],
)
mode = df.iloc[idx[0][i] + 7][0]
mode = int(mode.split(' ')[-1])
df_array = np.array(
df.iloc[idx[0][i] + 10 : idx[0][i + 1] - 1][0].str.split(r'\s{2,}').tolist(),
dtype='float',
)
df_add = pd.DataFrame(df_array, columns=column_list)
df_add = df_add.replace(999999.0, np.nan)
xr_add = df_add.to_xarray()
xr_add = xr_add.swap_dims({'index': 'height'})
xr_add = xr_add.reset_coords('index')
xr_add = xr_add.assign_coords({'time': np.array(time), 'height': xr_add['HT'].values})
if mode < 1000.0:
low.append(xr_add)
else:
hi.append(xr_add)
obj_low = xr.concat(low, 'time')
obj_hi = xr.concat(hi, 'time')
# Adding site information line 1
site_loc = df.iloc[idx[0][0]]
site_list = site_loc.str.split(r'\s{2}').tolist()
site = site_list[0][0].strip()
obj_low.attrs['site_identifier'] = site
obj_hi.attrs['site_identifier'] = site
# Adding data type and revision number line 2.
rev = df.loc[idx[0][0] + 1]
rev_list = rev.str.split(r'\s{3}').tolist()
rev_array = np.array(rev_list[0])
obj_low.attrs['data_type'] = rev_array[0].strip()
obj_hi.attrs['data_type'] = rev_array[0].strip()
obj_low.attrs['revision_number'] = rev_array[1].strip()
obj_hi.attrs['revision_number'] = rev_array[1].strip()
# Adding coordinate attributes line 3.
coords = df.loc[idx[0][0] + 2]
coords_list = coords.str.split(r'\s{2,}').tolist()
coords_list[0].remove('')
coords_array = np.array(coords_list[0], dtype='float32')
obj_low.attrs['latitude'] = np.array([coords_array[0]])
obj_hi.attrs['latitude'] = np.array([coords_array[0]])
obj_low.attrs['longitude'] = np.array([coords_array[1]])
obj_hi.attrs['longitude'] = np.array([coords_array[1]])
obj_low.attrs['altitude'] = np.array([coords_array[2]])
obj_hi.attrs['altitude'] = np.array([coords_array[2]])
# Adding azimuth and elevation line 9
az_el = df.loc[idx[0][0] + 8]
az_el_list = az_el.str.split(r'\s{2,}').tolist()
az_el_list[0].remove('')
az_el_array = np.array(az_el_list[0])
az = []
el = []
for i in az_el_array:
sep = i.split()
az.append(sep[0])
el.append(sep[1])
az_array = np.array(az, dtype='float32')
el_array = np.array(el, dtype='float32')
obj_low.attrs['azimuth'] = az_array
obj_hi.attrs['azimuth'] = az_array
obj_low.attrs['elevation'] = el_array
obj_hi.attrs['elevation'] = el_array
if transpose:
obj_low = obj_low.transpose()
obj_hi = obj_hi.transpose()
return obj_low, obj_hi
def read_psl_wind_profiler_temperature(filepath):
"""
Returns `xarray.Dataset` with stored data and metadata from a user-defined
NOAA PSL wind profiler temperature file.
Parameters
----------
filename : str
Name of file(s) to read.
Return
------
ds : Xarray.dataset
Standard Xarray dataset with the data
"""
# Open the file, read in the lines as a list, and return that list
file = fsspec.open(filepath).open()
lines = file.readlines()
newlist = [x.decode().rstrip()[1:] for x in lines][1:]
# 1 - site
site = newlist[0]
# 2 - datetype
datatype, _, version = filter_list(newlist[1].split(' '))
# 3 - station lat, lon, elevation
latitude, longitude, elevation = filter_list(newlist[2].split(' ')).astype(float)
# 4 - year, month, day, hour, minute, second, utc
time = parse_date_line(newlist[3])
# 5 - Consensus averaging time, number of beams, number of range gates
consensus_average_time, number_of_beams, number_of_range_gates = filter_list(
newlist[4].split(' ')
).astype(int)
# 7 - number of coherent integrations, number of spectral averages, pulse width, indder pulse period
(
number_coherent_integrations,
number_spectral_averages,
pulse_width,
inner_pulse_period,
) = filter_list(newlist[6].split(' ')).astype(int)
# 8 - full-scale doppler value, delay to first gate, number of gates, spacing of gates
full_scale_doppler, delay_first_gate, number_of_gates, spacing_of_gates = filter_list(
newlist[7].split(' ')
).astype(float)
# 9 - beam azimuth (degrees clockwise from north)
beam_azimuth, beam_elevation = filter_list(newlist[8].split(' ')).astype(float)
# Read in the data table section using pandas
df = pd.read_csv(filepath, skiprows=10, delim_whitespace=True)
# Only read in the number of rows for a given set of gates
df = df.iloc[: int(number_of_gates)]
# Nan values are encoded as 999999 - let's reflect that
df = df.replace(999999.0, np.nan)
# Ensure the height array is stored as a float
df['HT'] = df.HT.astype(float)
# Set the height as an index
df = df.set_index('HT')
# Rename the count and snr columns more usefully
df = df.rename(
columns={
'CNT': 'CNT_T',
'CNT.1': 'CNT_Tc',
'CNT.2': 'CNT_W',
'SNR': 'SNR_T',
'SNR.1': 'SNR_Tc',
'SNR.2': 'SNR_W',
}
)
# Convert to an xaray dataset
ds = df.to_xarray()
# Add attributes to variables
# Height
ds['HT'].attrs['long_name'] = 'height_above_ground'
ds['HT'].attrs['units'] = 'km'
# Temperature
ds['T'].attrs['long_name'] = 'average_uncorrected_RASS_temperature'
ds['T'].attrs['units'] = 'degC'
ds['Tc'].attrs['long_name'] = 'average_corrected_RASS_temperature'
ds['Tc'].attrs['units'] = 'degC'
# Vertical motion (w)
ds['W'].attrs['long_name'] = 'average_vertical_wind'
ds['W'].attrs['units'] = 'm/s'
# Add time to our dataset
ds['time'] = time
# Add in our additional attributes
ds.attrs['site_identifier'] = site
ds.attrs['latitude'] = latitude
ds.attrs['longitude'] = longitude
ds.attrs['elevation'] = elevation
ds.attrs['beam_azimuth'] = beam_azimuth
ds.attrs['revision_number'] = version
ds.attrs[
'data_description'
] = 'https://psl.noaa.gov/data/obs/data/view_data_type_info.php?SiteID=ctd&DataOperationalID=5855&OperationalID=2371'
ds.attrs['consensus_average_time'] = consensus_average_time
ds.attrs['number_of_beams'] = int(number_of_beams)
ds.attrs['number_of_gates'] = int(number_of_gates)
ds.attrs['number_of_range_gates'] = int(number_of_range_gates)
ds.attrs['number_spectral_averages'] = int(number_spectral_averages)
ds.attrs['pulse_width'] = pulse_width
ds.attrs['inner_pulse_period'] = inner_pulse_period
ds.attrs['full_scale_doppler_value'] = full_scale_doppler
ds.attrs['spacing_of_gates'] = spacing_of_gates
return ds
def filter_list(list_of_strings):
"""
Parses a list of strings, remove empty strings, and return a numpy array
"""
return np.array(list(filter(None, list_of_strings)))
def parse_date_line(list_of_strings):
"""
Parses the date line in PSL files
"""
year, month, day, hour, minute, second, utc_offset = filter_list(
list_of_strings.split(' ')
).astype(int)
year += 2000
return datetime(year, month, day, hour, minute, second)
<|code_end|>
|
ENH: Enable Hour/Minute Level of Searching with ARMLive API
Currently, the hour/minute/second field of the ARM data access API is not being passed to the query, preventing the ability to search at this level. This is an issue when working with larger datasets (ex. x-band radar) where a day of data is 10s of GB in size and a user wants a small subset.
I put together some new logic over in this PR (https://github.com/pangeo-forge/staged-recipes/pull/153) for the Pangeo Forge recipe.
This would be good to bring into ACT + update so users can use more refined searches.
|
act/discovery/get_armfiles.py
<|code_start|>"""
Script for downloading data from ARM's Live Data Webservice
"""
import argparse
import json
import os
import sys
try:
from urllib.request import urlopen
except ImportError:
from urllib import urlopen
from act.utils import date_parser
def download_data(username, token, datastream, startdate, enddate, time=None, output=None):
"""
This tool will help users utilize the ARM Live Data Webservice to download
ARM data.
Parameters
----------
username : str
The username to use for logging into the ADC archive.
token : str
The access token for accessing the ADC archive.
datastream : str
The name of the datastream to acquire.
startdate : str
The start date of the data to acquire. Formats accepted are
YYYY-MM-DD, DD.MM.YYYY, DD/MM/YYYY, YYYYMMDD or YYYY/MM/DD.
enddate : str
The end date of the data to acquire. Formats accepted are
YYYY-MM-DD, DD.MM.YYYY, DD/MM/YYYY, YYYYMMDD or YYYY/MM/DD.
time: str or None
The specific time. Format is HHMMSS. Set to None to download all files
in the given date interval.
output : str
The output directory for the data. Set to None to make a folder in the
current working directory with the same name as *datastream* to place
the files in.
Returns
-------
files : list
Returns list of files retrieved
Notes
-----
This programmatic interface allows users to query and automate
machine-to-machine downloads of ARM data. This tool uses a REST URL and
specific parameters (saveData, query), user ID and access token, a
datastream name, a start date, and an end date, and data files matching
the criteria will be returned to the user and downloaded.
By using this web service, users can setup cron jobs and automatically
download data from /data/archive into their workspace. This will also
eliminate the manual step of following a link in an email to download data.
All other data files, which are not on the spinning
disk (on HPSS), will have to go through the regular ordering process.
More information about this REST API and tools can be found on `ARM Live
<https://adc.arm.gov/armlive/#scripts>`_.
To login/register for an access token click `here
<https://adc.arm.gov/armlive/livedata/home>`_.
Author: Michael Giansiracusa
Email: [email protected]
Web Tools Contact: Ranjeet Devarakonda [email protected]
Examples
--------
This code will download the netCDF files from the sgpmetE13.b1 datastream
and place them in a directory named sgpmetE13.b1. The data from 14 Jan to
20 Jan 2017 will be downloaded. Replace *userName* and *XXXXXXXXXXXXXXXX*
with your username and token for ARM Data Discovery. See the Notes for
information on how to obtain a username and token.
.. code-block:: python
act.discovery.download_data(
"userName", "XXXXXXXXXXXXXXXX", "sgpmetE13.b1", "2017-01-14", "2017-01-20"
)
"""
# default start and end are empty
start, end = '', ''
# start and end strings for query_url are constructed
# if the arguments were provided
if startdate:
start = date_parser(startdate, output_format='%Y-%m-%d')
start = f'&start={startdate}'
if enddate:
end = date_parser(enddate, output_format='%Y-%m-%d')
end = f'&end={enddate}'
# build the url to query the web service using the arguments provided
query_url = (
'https://adc.arm.gov/armlive/livedata/query?' + 'user={0}&ds={1}{2}{3}&wt=json'
).format(':'.join([username, token]), datastream, start, end)
# get url response, read the body of the message,
# and decode from bytes type to utf-8 string
response_body = urlopen(query_url).read().decode('utf-8')
# if the response is an html doc, then there was an error with the user
if response_body[1:14] == '!DOCTYPE html':
raise ConnectionRefusedError('Error with user. Check username or token.')
# parse into json object
response_body_json = json.loads(response_body)
# construct output directory
if output:
# output files to directory specified
output_dir = os.path.join(output)
else:
# if no folder given, add datastream folder
# to current working dir to prevent file mix-up
output_dir = os.path.join(os.getcwd(), datastream)
# not testing, response is successful and files were returned
if response_body_json is None:
print('ARM Data Live Webservice does not appear to be functioning')
return []
num_files = len(response_body_json['files'])
file_names = []
if response_body_json['status'] == 'success' and num_files > 0:
for fname in response_body_json['files']:
if time is not None:
if time not in fname:
continue
print(f'[DOWNLOADING] {fname}')
# construct link to web service saveData function
save_data_url = (
'https://adc.arm.gov/armlive/livedata/' + 'saveData?user={0}&file={1}'
).format(':'.join([username, token]), fname)
output_file = os.path.join(output_dir, fname)
# make directory if it doesn't exist
if not os.path.isdir(output_dir):
os.makedirs(output_dir)
# create file and write bytes to file
with open(output_file, 'wb') as open_bytes_file:
open_bytes_file.write(urlopen(save_data_url).read())
file_names.append(output_file)
else:
print(
'No files returned or url status error.\n' 'Check datastream name, start, and end date.'
)
return file_names
<|code_end|>
act/utils/datetime_utils.py
<|code_start|>"""
Module that containing utilities involving datetimes.
"""
import datetime as dt
import warnings
import numpy as np
import pandas as pd
from scipy import stats
def dates_between(sdate, edate):
"""
Ths procedure returns all of the dates between *sdate* and *edate*.
Parameters
----------
sdate : str
The string containing the start date. The string is formatted
YYYYMMDD.
edate : str
The string containing the end date. The string is formatted
YYYYMMDD.
Returns
-------
all_dates : array of datetimes
The array containing the dates between *sdate* and *edate*.
"""
days = dt.datetime.strptime(edate, '%Y%m%d') - dt.datetime.strptime(sdate, '%Y%m%d')
all_dates = [
dt.datetime.strptime(sdate, '%Y%m%d') + dt.timedelta(days=d) for d in range(days.days + 1)
]
return all_dates
def numpy_to_arm_date(_date, returnTime=False):
"""
Given a numpy datetime64, return an ARM standard date (yyyymmdd).
Parameters
----------
date : numpy.datetime64
Numpy datetime64 date.
returnTime : boolean
If set to true, returns time instead of date
Returns
-------
arm_date : string
Returns an arm date.
"""
date = pd.to_datetime(str(_date))
if returnTime is False:
date = date.strftime('%Y%m%d')
else:
date = date.strftime('%H%M%S')
return date
def reduce_time_ranges(time, time_delta=60, broken_barh=False):
"""
Given a time series, this function will return a list of tuples of time
ranges representing the contineous times where no data is detected missing.
Parameters
----------
time : numpy datetime64 array
The numpy array of date time values.
time_delta : int
The number of seconds to use as default time step in time array.
broken_barh : boolean
Option to return start time and duration instead of start time and
end time. This is used with the pyplot.broken_barh() plotting routine.
Returns
-------
time_ranges : list of tuples with 2 numpy datetime64 times
The time range(s) of contineous data.
"""
# Convert integer sections to numpy datetime64
time_delta = np.timedelta64(int(time_delta * 1000), 'ms')
# Make a difference array to find where time difference is great than time_delta
diff = np.diff(time)
dd = np.where(diff > time_delta)[0]
if len(dd) == 0:
return [(time[0], time[-1] - time[0])]
# A add to start and end of array for beginning and end values
dd = np.insert(dd, 0, -1)
dd = np.append(dd, len(time) - 1)
# Create a list of tuples containg time ranges or start time with duration
if broken_barh:
return [
(time[dd[ii] + 1], time[dd[ii + 1]] - time[dd[ii] + 1]) for ii in range(len(dd) - 1)
]
else:
return [(time[dd[ii] + 1], time[dd[ii + 1]]) for ii in range(len(dd) - 1)]
def determine_time_delta(time, default=60):
"""
Returns the most likely time step in seconds by analyzing the difference
in time steps.
Parameters
----------
time : numpy datetime64 array
The numpy array of date time values.
default : int or float
The default number to return if unable to calculate a value.
Returns
-------
time_delta : float
Returns the number of seconds for the most common time step. If can't
calculate a value the default value is returned.
"""
with warnings.catch_warnings():
warnings.filterwarnings('ignore', category=RuntimeWarning)
if time.size > 1:
mode = stats.mode(np.diff(time))
time_delta = mode.mode[0]
time_delta = time_delta.astype('timedelta64[s]').astype(float)
else:
time_delta = default
return float(time_delta)
def datetime64_to_datetime(time):
"""
Given a numpy datetime64 array time series, return datetime
(y, m, d, h, m, s)
Parameters
----------
time : numpy datetime64 array, list of numpy datetime64 values or
scalar numpy datetime64. The numpy array of date time values.
Returns
-------
datetime : list
Returns a list of datetimes (y, m, d, h, m, s) from a time series.
YYYY-MM-DD, DD.MM.YYYY, DD/MM/YYYY or YYYYMMDD.
"""
if isinstance(time, (tuple, list)):
time = np.array(time)
if len(time.shape) == 0:
time = np.array([time])
datetime_array = [
dt.datetime.fromtimestamp(
tm.astype('datetime64[ms]').astype('float') / 1000.0, tz=dt.timezone.utc
).replace(tzinfo=None)
for tm in time
]
return datetime_array
def date_parser(date_string, output_format='%Y%m%d',
return_datetime=False):
""" Converts one datetime string to another or to
a datetime object.
Parameters
----------
date_string : str
datetime string to be parsed. Accepted formats are
YYYY-MM-DD, DD.MM.YYYY, DD/MM/YYYY or YYYYMMDD.
output_format : str
Format for datetime.strftime to output datetime string.
return_datetime : bool
If true, returns str as a datetime object.
Default is False.
returns
-------
datetime_str : str
A valid datetime string.
datetime_obj : datetime.datetime
A datetime object.
"""
date_fmts = ['%Y-%m-%d', '%d.%m.%Y',
'%d/%m/%Y', '%Y%m%d', '%Y/%m/%d']
for fmt in date_fmts:
try:
datetime_obj = dt.datetime.strptime(date_string, fmt)
if return_datetime:
return datetime_obj
else:
return datetime_obj.strftime(output_format)
except ValueError:
pass
fmt_strings = ', '.join(date_fmts)
raise ValueError(
'Invalid Date format, please use one of these formats '
+ fmt_strings)
<|code_end|>
|
act/discovery/get_armfiles.py
<|code_start|>"""
Script for downloading data from ARM's Live Data Webservice
"""
import argparse
import json
import os
import sys
try:
from urllib.request import urlopen
except ImportError:
from urllib import urlopen
from act.utils import date_parser
def download_data(username, token, datastream, startdate, enddate, time=None, output=None):
"""
This tool will help users utilize the ARM Live Data Webservice to download
ARM data.
Parameters
----------
username : str
The username to use for logging into the ADC archive.
token : str
The access token for accessing the ADC archive.
datastream : str
The name of the datastream to acquire.
startdate : str
The start date of the data to acquire. Formats accepted are
YYYY-MM-DD, DD.MM.YYYY, DD/MM/YYYY, YYYYMMDD, YYYY/MM/DD or
any of the previous formats with THH:MM:SS added onto the end
(ex. 2020-09-15T12:00:00).
enddate : str
The end date of the data to acquire. Formats accepted are
YYYY-MM-DD, DD.MM.YYYY, DD/MM/YYYY, YYYYMMDD or YYYY/MM/DD, or
any of the previous formats with THH:MM:SS added onto the end
(ex. 2020-09-15T13:00:00).
time: str or None
The specific time. Format is HHMMSS. Set to None to download all files
in the given date interval.
output : str
The output directory for the data. Set to None to make a folder in the
current working directory with the same name as *datastream* to place
the files in.
Returns
-------
files : list
Returns list of files retrieved
Notes
-----
This programmatic interface allows users to query and automate
machine-to-machine downloads of ARM data. This tool uses a REST URL and
specific parameters (saveData, query), user ID and access token, a
datastream name, a start date, and an end date, and data files matching
the criteria will be returned to the user and downloaded.
By using this web service, users can setup cron jobs and automatically
download data from /data/archive into their workspace. This will also
eliminate the manual step of following a link in an email to download data.
All other data files, which are not on the spinning
disk (on HPSS), will have to go through the regular ordering process.
More information about this REST API and tools can be found on `ARM Live
<https://adc.arm.gov/armlive/#scripts>`_.
To login/register for an access token click `here
<https://adc.arm.gov/armlive/livedata/home>`_.
Author: Michael Giansiracusa
Email: [email protected]
Web Tools Contact: Ranjeet Devarakonda [email protected]
Examples
--------
This code will download the netCDF files from the sgpmetE13.b1 datastream
and place them in a directory named sgpmetE13.b1. The data from 14 Jan to
20 Jan 2017 will be downloaded. Replace *userName* and *XXXXXXXXXXXXXXXX*
with your username and token for ARM Data Discovery. See the Notes for
information on how to obtain a username and token.
.. code-block:: python
act.discovery.download_data(
"userName", "XXXXXXXXXXXXXXXX", "sgpmetE13.b1", "2017-01-14", "2017-01-20"
)
"""
# default start and end are empty
start, end = '', ''
# start and end strings for query_url are constructed
# if the arguments were provided
if startdate:
start = date_parser(startdate, output_format='%Y-%m-%dT%H:%M:%SZ')
start = f'&start={startdate}'
if enddate:
end = date_parser(enddate, output_format='%Y-%m-%dT%H:%M:%SZ')
end = f'&end={enddate}'
# build the url to query the web service using the arguments provided
query_url = (
'https://adc.arm.gov/armlive/livedata/query?' + 'user={0}&ds={1}{2}{3}&wt=json'
).format(':'.join([username, token]), datastream, start, end)
# get url response, read the body of the message,
# and decode from bytes type to utf-8 string
response_body = urlopen(query_url).read().decode('utf-8')
# if the response is an html doc, then there was an error with the user
if response_body[1:14] == '!DOCTYPE html':
raise ConnectionRefusedError('Error with user. Check username or token.')
# parse into json object
response_body_json = json.loads(response_body)
# construct output directory
if output:
# output files to directory specified
output_dir = os.path.join(output)
else:
# if no folder given, add datastream folder
# to current working dir to prevent file mix-up
output_dir = os.path.join(os.getcwd(), datastream)
# not testing, response is successful and files were returned
if response_body_json is None:
print('ARM Data Live Webservice does not appear to be functioning')
return []
num_files = len(response_body_json['files'])
file_names = []
if response_body_json['status'] == 'success' and num_files > 0:
for fname in response_body_json['files']:
if time is not None:
if time not in fname:
continue
print(f'[DOWNLOADING] {fname}')
# construct link to web service saveData function
save_data_url = (
'https://adc.arm.gov/armlive/livedata/' + 'saveData?user={0}&file={1}'
).format(':'.join([username, token]), fname)
output_file = os.path.join(output_dir, fname)
# make directory if it doesn't exist
if not os.path.isdir(output_dir):
os.makedirs(output_dir)
# create file and write bytes to file
with open(output_file, 'wb') as open_bytes_file:
open_bytes_file.write(urlopen(save_data_url).read())
file_names.append(output_file)
else:
print(
'No files returned or url status error.\n' 'Check datastream name, start, and end date.'
)
return file_names
<|code_end|>
act/utils/datetime_utils.py
<|code_start|>"""
Module that containing utilities involving datetimes.
"""
import datetime as dt
import warnings
import numpy as np
import pandas as pd
from scipy import stats
def dates_between(sdate, edate):
"""
Ths procedure returns all of the dates between *sdate* and *edate*.
Parameters
----------
sdate : str
The string containing the start date. The string is formatted
YYYYMMDD.
edate : str
The string containing the end date. The string is formatted
YYYYMMDD.
Returns
-------
all_dates : array of datetimes
The array containing the dates between *sdate* and *edate*.
"""
days = dt.datetime.strptime(edate, '%Y%m%d') - dt.datetime.strptime(sdate, '%Y%m%d')
all_dates = [
dt.datetime.strptime(sdate, '%Y%m%d') + dt.timedelta(days=d) for d in range(days.days + 1)
]
return all_dates
def numpy_to_arm_date(_date, returnTime=False):
"""
Given a numpy datetime64, return an ARM standard date (yyyymmdd).
Parameters
----------
date : numpy.datetime64
Numpy datetime64 date.
returnTime : boolean
If set to true, returns time instead of date
Returns
-------
arm_date : string
Returns an arm date.
"""
date = pd.to_datetime(str(_date))
if returnTime is False:
date = date.strftime('%Y%m%d')
else:
date = date.strftime('%H%M%S')
return date
def reduce_time_ranges(time, time_delta=60, broken_barh=False):
"""
Given a time series, this function will return a list of tuples of time
ranges representing the contineous times where no data is detected missing.
Parameters
----------
time : numpy datetime64 array
The numpy array of date time values.
time_delta : int
The number of seconds to use as default time step in time array.
broken_barh : boolean
Option to return start time and duration instead of start time and
end time. This is used with the pyplot.broken_barh() plotting routine.
Returns
-------
time_ranges : list of tuples with 2 numpy datetime64 times
The time range(s) of contineous data.
"""
# Convert integer sections to numpy datetime64
time_delta = np.timedelta64(int(time_delta * 1000), 'ms')
# Make a difference array to find where time difference is great than time_delta
diff = np.diff(time)
dd = np.where(diff > time_delta)[0]
if len(dd) == 0:
return [(time[0], time[-1] - time[0])]
# A add to start and end of array for beginning and end values
dd = np.insert(dd, 0, -1)
dd = np.append(dd, len(time) - 1)
# Create a list of tuples containg time ranges or start time with duration
if broken_barh:
return [
(time[dd[ii] + 1], time[dd[ii + 1]] - time[dd[ii] + 1]) for ii in range(len(dd) - 1)
]
else:
return [(time[dd[ii] + 1], time[dd[ii + 1]]) for ii in range(len(dd) - 1)]
def determine_time_delta(time, default=60):
"""
Returns the most likely time step in seconds by analyzing the difference
in time steps.
Parameters
----------
time : numpy datetime64 array
The numpy array of date time values.
default : int or float
The default number to return if unable to calculate a value.
Returns
-------
time_delta : float
Returns the number of seconds for the most common time step. If can't
calculate a value the default value is returned.
"""
with warnings.catch_warnings():
warnings.filterwarnings('ignore', category=RuntimeWarning)
if time.size > 1:
mode = stats.mode(np.diff(time))
time_delta = mode.mode[0]
time_delta = time_delta.astype('timedelta64[s]').astype(float)
else:
time_delta = default
return float(time_delta)
def datetime64_to_datetime(time):
"""
Given a numpy datetime64 array time series, return datetime
(y, m, d, h, m, s)
Parameters
----------
time : numpy datetime64 array, list of numpy datetime64 values or
scalar numpy datetime64. The numpy array of date time values.
Returns
-------
datetime : list
Returns a list of datetimes (y, m, d, h, m, s) from a time series.
YYYY-MM-DD, DD.MM.YYYY, DD/MM/YYYY or YYYYMMDD.
"""
if isinstance(time, (tuple, list)):
time = np.array(time)
if len(time.shape) == 0:
time = np.array([time])
datetime_array = [
dt.datetime.fromtimestamp(
tm.astype('datetime64[ms]').astype('float') / 1000.0, tz=dt.timezone.utc
).replace(tzinfo=None)
for tm in time
]
return datetime_array
def date_parser(date_string, output_format='%Y%m%d', return_datetime=False):
"""Converts one datetime string to another or to
a datetime object.
Parameters
----------
date_string : str
datetime string to be parsed. Accepted formats are
YYYY-MM-DD, DD.MM.YYYY, DD/MM/YYYY or YYYYMMDD.
output_format : str
Format for datetime.strftime to output datetime string.
return_datetime : bool
If true, returns str as a datetime object.
Default is False.
returns
-------
datetime_str : str
A valid datetime string.
datetime_obj : datetime.datetime
A datetime object.
"""
date_fmts = [
'%Y-%m-%d',
'%d.%m.%Y',
'%d/%m/%Y',
'%Y%m%d',
'%Y/%m/%d',
'%Y-%m-%dT%H%M%S',
'%d.%m.%YT%H%M%S',
'%d/%m/%YT%H%M%S',
'%Y%m%dT%H%M%S',
'%Y/%m/%dT%H%M%S',
]
for fmt in date_fmts:
try:
datetime_obj = dt.datetime.strptime(date_string, fmt)
if return_datetime:
return datetime_obj
else:
return datetime_obj.strftime(output_format)
except ValueError:
pass
fmt_strings = ', '.join(date_fmts)
raise ValueError('Invalid Date format, please use one of these formats ' + fmt_strings)
<|code_end|>
|
read_psl_wind_profiler_temperature not reading full file
It looks like the new function to read NOAA PSL temperature is not reading the full file. For instance, the file linked below has 2 times in it 172536 and 175538 UTC but only the first one is being read in. We need to update it to read in all times in the file.
https://downloads.psl.noaa.gov/psd2/data/realtime/Radar449/WwTempSubHourly/ctd/2022/196/ctd22196.17t
|
act/discovery/get_armfiles.py
<|code_start|>"""
Script for downloading data from ARM's Live Data Webservice
"""
import argparse
import json
import os
import sys
try:
from urllib.request import urlopen
except ImportError:
from urllib import urlopen
from act.utils import date_parser
def download_data(username, token, datastream, startdate, enddate, time=None, output=None):
"""
This tool will help users utilize the ARM Live Data Webservice to download
ARM data.
Parameters
----------
username : str
The username to use for logging into the ADC archive.
token : str
The access token for accessing the ADC archive.
datastream : str
The name of the datastream to acquire.
startdate : str
The start date of the data to acquire. Formats accepted are
YYYY-MM-DD, DD.MM.YYYY, DD/MM/YYYY, YYYYMMDD, YYYY/MM/DD or
any of the previous formats with THH:MM:SS added onto the end
(ex. 2020-09-15T12:00:00).
enddate : str
The end date of the data to acquire. Formats accepted are
YYYY-MM-DD, DD.MM.YYYY, DD/MM/YYYY, YYYYMMDD or YYYY/MM/DD, or
any of the previous formats with THH:MM:SS added onto the end
(ex. 2020-09-15T13:00:00).
time: str or None
The specific time. Format is HHMMSS. Set to None to download all files
in the given date interval.
output : str
The output directory for the data. Set to None to make a folder in the
current working directory with the same name as *datastream* to place
the files in.
Returns
-------
files : list
Returns list of files retrieved
Notes
-----
This programmatic interface allows users to query and automate
machine-to-machine downloads of ARM data. This tool uses a REST URL and
specific parameters (saveData, query), user ID and access token, a
datastream name, a start date, and an end date, and data files matching
the criteria will be returned to the user and downloaded.
By using this web service, users can setup cron jobs and automatically
download data from /data/archive into their workspace. This will also
eliminate the manual step of following a link in an email to download data.
All other data files, which are not on the spinning
disk (on HPSS), will have to go through the regular ordering process.
More information about this REST API and tools can be found on `ARM Live
<https://adc.arm.gov/armlive/#scripts>`_.
To login/register for an access token click `here
<https://adc.arm.gov/armlive/livedata/home>`_.
Author: Michael Giansiracusa
Email: [email protected]
Web Tools Contact: Ranjeet Devarakonda [email protected]
Examples
--------
This code will download the netCDF files from the sgpmetE13.b1 datastream
and place them in a directory named sgpmetE13.b1. The data from 14 Jan to
20 Jan 2017 will be downloaded. Replace *userName* and *XXXXXXXXXXXXXXXX*
with your username and token for ARM Data Discovery. See the Notes for
information on how to obtain a username and token.
.. code-block:: python
act.discovery.download_data(
"userName", "XXXXXXXXXXXXXXXX", "sgpmetE13.b1", "2017-01-14", "2017-01-20"
)
"""
# default start and end are empty
start, end = '', ''
# start and end strings for query_url are constructed
# if the arguments were provided
if startdate:
start = date_parser(startdate, output_format='%Y-%m-%dT%H:%M:%SZ')
start = f'&start={startdate}'
if enddate:
end = date_parser(enddate, output_format='%Y-%m-%dT%H:%M:%SZ')
end = f'&end={enddate}'
# build the url to query the web service using the arguments provided
query_url = (
'https://adc.arm.gov/armlive/livedata/query?' + 'user={0}&ds={1}{2}{3}&wt=json'
).format(':'.join([username, token]), datastream, start, end)
# get url response, read the body of the message,
# and decode from bytes type to utf-8 string
response_body = urlopen(query_url).read().decode('utf-8')
# if the response is an html doc, then there was an error with the user
if response_body[1:14] == '!DOCTYPE html':
raise ConnectionRefusedError('Error with user. Check username or token.')
# parse into json object
response_body_json = json.loads(response_body)
# construct output directory
if output:
# output files to directory specified
output_dir = os.path.join(output)
else:
# if no folder given, add datastream folder
# to current working dir to prevent file mix-up
output_dir = os.path.join(os.getcwd(), datastream)
# not testing, response is successful and files were returned
if response_body_json is None:
print('ARM Data Live Webservice does not appear to be functioning')
return []
num_files = len(response_body_json['files'])
file_names = []
if response_body_json['status'] == 'success' and num_files > 0:
for fname in response_body_json['files']:
if time is not None:
if time not in fname:
continue
print(f'[DOWNLOADING] {fname}')
# construct link to web service saveData function
save_data_url = (
'https://adc.arm.gov/armlive/livedata/' + 'saveData?user={0}&file={1}'
).format(':'.join([username, token]), fname)
output_file = os.path.join(output_dir, fname)
# make directory if it doesn't exist
if not os.path.isdir(output_dir):
os.makedirs(output_dir)
# create file and write bytes to file
with open(output_file, 'wb') as open_bytes_file:
open_bytes_file.write(urlopen(save_data_url).read())
file_names.append(output_file)
else:
print(
'No files returned or url status error.\n' 'Check datastream name, start, and end date.'
)
return file_names
<|code_end|>
act/io/noaapsl.py
<|code_start|>"""
Modules for reading in NOAA PSL data.
"""
from datetime import datetime
import fsspec
import numpy as np
import pandas as pd
import xarray as xr
def read_psl_wind_profiler(filename, transpose=True):
"""
Returns `xarray.Dataset` with stored data and metadata from a user-defined
NOAA PSL wind profiler file.
Parameters
----------
filename : str
Name of file(s) to read.
transpose : bool
True to transpose the data.
Return
------
obj_low : Xarray.dataset
Standard Xarray dataset with the data for low mode
obj_high : Xarray.dataset
Standard Xarray dataset with the data for high mode.
"""
# read file with pandas for preparation.
df = pd.read_csv(filename, header=None)
# Get location of where each table begins
index_list = df[0] == ' CTD'
idx = np.where(index_list)
# Get header of each column of data.
column_list = list(df.loc[9][0].split())
beam_vars = ['RAD', 'CNT', 'SNR', 'QC']
for i, c in enumerate(column_list):
if c in beam_vars:
if column_list.count(c) > 2:
column_list[i] = c + '1'
elif column_list.count(c) > 1:
column_list[i] = c + '2'
elif column_list.count(c) > 0:
column_list[i] = c + '3'
# Loop through column data only which appears after 10 lines of metadata.
# Year, Month, day, hour, minute, second, utc offset
low = []
hi = []
for i in range(idx[0].shape[0] - 1):
# index each table by using the idx of when CTD appears.
# str split is use as 2 spaces are added to each data point,
# convert to float.
date_str = df.iloc[idx[0][i] + 3]
date_str = list(filter(None, date_str[0].split(' ')))
date_str = list(map(int, date_str))
# Datetime not taking into account the utc offset yet
time = datetime(
2000 + date_str[0],
date_str[1],
date_str[2],
date_str[3],
date_str[4],
date_str[5],
)
mode = df.iloc[idx[0][i] + 7][0]
mode = int(mode.split(' ')[-1])
df_array = np.array(
df.iloc[idx[0][i] + 10 : idx[0][i + 1] - 1][0].str.split(r'\s{2,}').tolist(),
dtype='float',
)
df_add = pd.DataFrame(df_array, columns=column_list)
df_add = df_add.replace(999999.0, np.nan)
xr_add = df_add.to_xarray()
xr_add = xr_add.swap_dims({'index': 'height'})
xr_add = xr_add.reset_coords('index')
xr_add = xr_add.assign_coords({'time': np.array(time), 'height': xr_add['HT'].values})
if mode < 1000.0:
low.append(xr_add)
else:
hi.append(xr_add)
obj_low = xr.concat(low, 'time')
obj_hi = xr.concat(hi, 'time')
# Adding site information line 1
site_loc = df.iloc[idx[0][0]]
site_list = site_loc.str.split(r'\s{2}').tolist()
site = site_list[0][0].strip()
obj_low.attrs['site_identifier'] = site
obj_hi.attrs['site_identifier'] = site
# Adding data type and revision number line 2.
rev = df.loc[idx[0][0] + 1]
rev_list = rev.str.split(r'\s{3}').tolist()
rev_array = np.array(rev_list[0])
obj_low.attrs['data_type'] = rev_array[0].strip()
obj_hi.attrs['data_type'] = rev_array[0].strip()
obj_low.attrs['revision_number'] = rev_array[1].strip()
obj_hi.attrs['revision_number'] = rev_array[1].strip()
# Adding coordinate attributes line 3.
coords = df.loc[idx[0][0] + 2]
coords_list = coords.str.split(r'\s{2,}').tolist()
coords_list[0].remove('')
coords_array = np.array(coords_list[0], dtype='float32')
obj_low.attrs['latitude'] = np.array([coords_array[0]])
obj_hi.attrs['latitude'] = np.array([coords_array[0]])
obj_low.attrs['longitude'] = np.array([coords_array[1]])
obj_hi.attrs['longitude'] = np.array([coords_array[1]])
obj_low.attrs['altitude'] = np.array([coords_array[2]])
obj_hi.attrs['altitude'] = np.array([coords_array[2]])
# Adding azimuth and elevation line 9
az_el = df.loc[idx[0][0] + 8]
az_el_list = az_el.str.split(r'\s{2,}').tolist()
az_el_list[0].remove('')
az_el_array = np.array(az_el_list[0])
az = []
el = []
for i in az_el_array:
sep = i.split()
az.append(sep[0])
el.append(sep[1])
az_array = np.array(az, dtype='float32')
el_array = np.array(el, dtype='float32')
obj_low.attrs['azimuth'] = az_array
obj_hi.attrs['azimuth'] = az_array
obj_low.attrs['elevation'] = el_array
obj_hi.attrs['elevation'] = el_array
if transpose:
obj_low = obj_low.transpose()
obj_hi = obj_hi.transpose()
return obj_low, obj_hi
def read_psl_wind_profiler_temperature(filepath):
"""
Returns `xarray.Dataset` with stored data and metadata from a user-defined
NOAA PSL wind profiler temperature file.
Parameters
----------
filename : str
Name of file(s) to read.
Return
------
ds : Xarray.dataset
Standard Xarray dataset with the data
"""
# Open the file, read in the lines as a list, and return that list
file = fsspec.open(filepath).open()
lines = file.readlines()
newlist = [x.decode().rstrip()[1:] for x in lines][1:]
# 1 - site
site = newlist[0]
# 2 - datetype
datatype, _, version = filter_list(newlist[1].split(' '))
# 3 - station lat, lon, elevation
latitude, longitude, elevation = filter_list(newlist[2].split(' ')).astype(float)
# 4 - year, month, day, hour, minute, second, utc
time = parse_date_line(newlist[3])
# 5 - Consensus averaging time, number of beams, number of range gates
consensus_average_time, number_of_beams, number_of_range_gates = filter_list(
newlist[4].split(' ')
).astype(int)
# 7 - number of coherent integrations, number of spectral averages, pulse width, indder pulse period
(
number_coherent_integrations,
number_spectral_averages,
pulse_width,
inner_pulse_period,
) = filter_list(newlist[6].split(' ')).astype(int)
# 8 - full-scale doppler value, delay to first gate, number of gates, spacing of gates
full_scale_doppler, delay_first_gate, number_of_gates, spacing_of_gates = filter_list(
newlist[7].split(' ')
).astype(float)
# 9 - beam azimuth (degrees clockwise from north)
beam_azimuth, beam_elevation = filter_list(newlist[8].split(' ')).astype(float)
# Read in the data table section using pandas
df = pd.read_csv(filepath, skiprows=10, delim_whitespace=True)
# Only read in the number of rows for a given set of gates
df = df.iloc[: int(number_of_gates)]
# Nan values are encoded as 999999 - let's reflect that
df = df.replace(999999.0, np.nan)
# Ensure the height array is stored as a float
df['HT'] = df.HT.astype(float)
# Set the height as an index
df = df.set_index('HT')
# Rename the count and snr columns more usefully
df = df.rename(
columns={
'CNT': 'CNT_T',
'CNT.1': 'CNT_Tc',
'CNT.2': 'CNT_W',
'SNR': 'SNR_T',
'SNR.1': 'SNR_Tc',
'SNR.2': 'SNR_W',
}
)
# Convert to an xaray dataset
ds = df.to_xarray()
# Add attributes to variables
# Height
ds['HT'].attrs['long_name'] = 'height_above_ground'
ds['HT'].attrs['units'] = 'km'
# Temperature
ds['T'].attrs['long_name'] = 'average_uncorrected_RASS_temperature'
ds['T'].attrs['units'] = 'degC'
ds['Tc'].attrs['long_name'] = 'average_corrected_RASS_temperature'
ds['Tc'].attrs['units'] = 'degC'
# Vertical motion (w)
ds['W'].attrs['long_name'] = 'average_vertical_wind'
ds['W'].attrs['units'] = 'm/s'
# Add time to our dataset
ds['time'] = time
# Add in our additional attributes
ds.attrs['site_identifier'] = site
ds.attrs['latitude'] = latitude
ds.attrs['longitude'] = longitude
ds.attrs['elevation'] = elevation
ds.attrs['beam_azimuth'] = beam_azimuth
ds.attrs['revision_number'] = version
ds.attrs[
'data_description'
] = 'https://psl.noaa.gov/data/obs/data/view_data_type_info.php?SiteID=ctd&DataOperationalID=5855&OperationalID=2371'
ds.attrs['consensus_average_time'] = consensus_average_time
ds.attrs['number_of_beams'] = int(number_of_beams)
ds.attrs['number_of_gates'] = int(number_of_gates)
ds.attrs['number_of_range_gates'] = int(number_of_range_gates)
ds.attrs['number_spectral_averages'] = int(number_spectral_averages)
ds.attrs['pulse_width'] = pulse_width
ds.attrs['inner_pulse_period'] = inner_pulse_period
ds.attrs['full_scale_doppler_value'] = full_scale_doppler
ds.attrs['spacing_of_gates'] = spacing_of_gates
return ds
def filter_list(list_of_strings):
"""
Parses a list of strings, remove empty strings, and return a numpy array
"""
return np.array(list(filter(None, list_of_strings)))
def parse_date_line(list_of_strings):
"""
Parses the date line in PSL files
"""
year, month, day, hour, minute, second, utc_offset = filter_list(
list_of_strings.split(' ')
).astype(int)
year += 2000
return datetime(year, month, day, hour, minute, second)
<|code_end|>
|
act/discovery/get_armfiles.py
<|code_start|>"""
Script for downloading data from ARM's Live Data Webservice
"""
import argparse
import json
import os
import sys
try:
from urllib.request import urlopen
except ImportError:
from urllib import urlopen
from act.utils import date_parser
def download_data(username, token, datastream, startdate, enddate, time=None, output=None):
"""
This tool will help users utilize the ARM Live Data Webservice to download
ARM data.
Parameters
----------
username : str
The username to use for logging into the ADC archive.
token : str
The access token for accessing the ADC archive.
datastream : str
The name of the datastream to acquire.
startdate : str
The start date of the data to acquire. Formats accepted are
YYYY-MM-DD, DD.MM.YYYY, DD/MM/YYYY, YYYYMMDD, YYYY/MM/DD or
any of the previous formats with THH:MM:SS added onto the end
(ex. 2020-09-15T12:00:00).
enddate : str
The end date of the data to acquire. Formats accepted are
YYYY-MM-DD, DD.MM.YYYY, DD/MM/YYYY, YYYYMMDD or YYYY/MM/DD, or
any of the previous formats with THH:MM:SS added onto the end
(ex. 2020-09-15T13:00:00).
time: str or None
The specific time. Format is HHMMSS. Set to None to download all files
in the given date interval.
output : str
The output directory for the data. Set to None to make a folder in the
current working directory with the same name as *datastream* to place
the files in.
Returns
-------
files : list
Returns list of files retrieved
Notes
-----
This programmatic interface allows users to query and automate
machine-to-machine downloads of ARM data. This tool uses a REST URL and
specific parameters (saveData, query), user ID and access token, a
datastream name, a start date, and an end date, and data files matching
the criteria will be returned to the user and downloaded.
By using this web service, users can setup cron jobs and automatically
download data from /data/archive into their workspace. This will also
eliminate the manual step of following a link in an email to download data.
All other data files, which are not on the spinning
disk (on HPSS), will have to go through the regular ordering process.
More information about this REST API and tools can be found on `ARM Live
<https://adc.arm.gov/armlive/#scripts>`_.
To login/register for an access token click `here
<https://adc.arm.gov/armlive/livedata/home>`_.
Author: Michael Giansiracusa
Email: [email protected]
Web Tools Contact: Ranjeet Devarakonda [email protected]
Examples
--------
This code will download the netCDF files from the sgpmetE13.b1 datastream
and place them in a directory named sgpmetE13.b1. The data from 14 Jan to
20 Jan 2017 will be downloaded. Replace *userName* and *XXXXXXXXXXXXXXXX*
with your username and token for ARM Data Discovery. See the Notes for
information on how to obtain a username and token.
.. code-block:: python
act.discovery.download_data(
"userName", "XXXXXXXXXXXXXXXX", "sgpmetE13.b1", "2017-01-14", "2017-01-20"
)
"""
# default start and end are empty
start, end = '', ''
# start and end strings for query_url are constructed
# if the arguments were provided
if startdate:
start = date_parser(startdate, output_format='%Y-%m-%dT%H:%M:%S.f')[:-3] + 'Z'
start = f'&start={startdate}'
if enddate:
end = date_parser(enddate, output_format='%Y-%m-%dT%H:%M:%SZ.f')[:-3] + 'Z'
end = f'&end={enddate}'
# build the url to query the web service using the arguments provided
query_url = (
'https://adc.arm.gov/armlive/livedata/query?' + 'user={0}&ds={1}{2}{3}&wt=json'
).format(':'.join([username, token]), datastream, start, end)
# get url response, read the body of the message,
# and decode from bytes type to utf-8 string
response_body = urlopen(query_url).read().decode('utf-8')
# if the response is an html doc, then there was an error with the user
if response_body[1:14] == '!DOCTYPE html':
raise ConnectionRefusedError('Error with user. Check username or token.')
# parse into json object
response_body_json = json.loads(response_body)
# construct output directory
if output:
# output files to directory specified
output_dir = os.path.join(output)
else:
# if no folder given, add datastream folder
# to current working dir to prevent file mix-up
output_dir = os.path.join(os.getcwd(), datastream)
# not testing, response is successful and files were returned
if response_body_json is None:
print('ARM Data Live Webservice does not appear to be functioning')
return []
num_files = len(response_body_json['files'])
file_names = []
if response_body_json['status'] == 'success' and num_files > 0:
for fname in response_body_json['files']:
if time is not None:
if time not in fname:
continue
print(f'[DOWNLOADING] {fname}')
# construct link to web service saveData function
save_data_url = (
'https://adc.arm.gov/armlive/livedata/' + 'saveData?user={0}&file={1}'
).format(':'.join([username, token]), fname)
output_file = os.path.join(output_dir, fname)
# make directory if it doesn't exist
if not os.path.isdir(output_dir):
os.makedirs(output_dir)
# create file and write bytes to file
with open(output_file, 'wb') as open_bytes_file:
open_bytes_file.write(urlopen(save_data_url).read())
file_names.append(output_file)
else:
print(
'No files returned or url status error.\n' 'Check datastream name, start, and end date.'
)
return file_names
<|code_end|>
act/io/noaapsl.py
<|code_start|>"""
Modules for reading in NOAA PSL data.
"""
from datetime import datetime
from itertools import groupby
import fsspec
import numpy as np
import pandas as pd
import xarray as xr
def read_psl_wind_profiler(filename, transpose=True):
"""
Returns `xarray.Dataset` with stored data and metadata from a user-defined
NOAA PSL wind profiler file.
Parameters
----------
filename : str
Name of file(s) to read.
transpose : bool
True to transpose the data.
Return
------
obj_low : Xarray.dataset
Standard Xarray dataset with the data for low mode
obj_high : Xarray.dataset
Standard Xarray dataset with the data for high mode.
"""
# read file with pandas for preparation.
df = pd.read_csv(filename, header=None)
# Get location of where each table begins
index_list = df[0] == ' CTD'
idx = np.where(index_list)
# Get header of each column of data.
column_list = list(df.loc[9][0].split())
beam_vars = ['RAD', 'CNT', 'SNR', 'QC']
for i, c in enumerate(column_list):
if c in beam_vars:
if column_list.count(c) > 2:
column_list[i] = c + '1'
elif column_list.count(c) > 1:
column_list[i] = c + '2'
elif column_list.count(c) > 0:
column_list[i] = c + '3'
# Loop through column data only which appears after 10 lines of metadata.
# Year, Month, day, hour, minute, second, utc offset
low = []
hi = []
for i in range(idx[0].shape[0] - 1):
# index each table by using the idx of when CTD appears.
# str split is use as 2 spaces are added to each data point,
# convert to float.
date_str = df.iloc[idx[0][i] + 3]
date_str = list(filter(None, date_str[0].split(' ')))
date_str = list(map(int, date_str))
# Datetime not taking into account the utc offset yet
time = datetime(
2000 + date_str[0],
date_str[1],
date_str[2],
date_str[3],
date_str[4],
date_str[5],
)
mode = df.iloc[idx[0][i] + 7][0]
mode = int(mode.split(' ')[-1])
df_array = np.array(
df.iloc[idx[0][i] + 10 : idx[0][i + 1] - 1][0].str.split(r'\s{2,}').tolist(),
dtype='float',
)
df_add = pd.DataFrame(df_array, columns=column_list)
df_add = df_add.replace(999999.0, np.nan)
xr_add = df_add.to_xarray()
xr_add = xr_add.swap_dims({'index': 'height'})
xr_add = xr_add.reset_coords('index')
xr_add = xr_add.assign_coords({'time': np.array(time), 'height': xr_add['HT'].values})
if mode < 1000.0:
low.append(xr_add)
else:
hi.append(xr_add)
obj_low = xr.concat(low, 'time')
obj_hi = xr.concat(hi, 'time')
# Adding site information line 1
site_loc = df.iloc[idx[0][0]]
site_list = site_loc.str.split(r'\s{2}').tolist()
site = site_list[0][0].strip()
obj_low.attrs['site_identifier'] = site
obj_hi.attrs['site_identifier'] = site
# Adding data type and revision number line 2.
rev = df.loc[idx[0][0] + 1]
rev_list = rev.str.split(r'\s{3}').tolist()
rev_array = np.array(rev_list[0])
obj_low.attrs['data_type'] = rev_array[0].strip()
obj_hi.attrs['data_type'] = rev_array[0].strip()
obj_low.attrs['revision_number'] = rev_array[1].strip()
obj_hi.attrs['revision_number'] = rev_array[1].strip()
# Adding coordinate attributes line 3.
coords = df.loc[idx[0][0] + 2]
coords_list = coords.str.split(r'\s{2,}').tolist()
coords_list[0].remove('')
coords_array = np.array(coords_list[0], dtype='float32')
obj_low.attrs['latitude'] = np.array([coords_array[0]])
obj_hi.attrs['latitude'] = np.array([coords_array[0]])
obj_low.attrs['longitude'] = np.array([coords_array[1]])
obj_hi.attrs['longitude'] = np.array([coords_array[1]])
obj_low.attrs['altitude'] = np.array([coords_array[2]])
obj_hi.attrs['altitude'] = np.array([coords_array[2]])
# Adding azimuth and elevation line 9
az_el = df.loc[idx[0][0] + 8]
az_el_list = az_el.str.split(r'\s{2,}').tolist()
az_el_list[0].remove('')
az_el_array = np.array(az_el_list[0])
az = []
el = []
for i in az_el_array:
sep = i.split()
az.append(sep[0])
el.append(sep[1])
az_array = np.array(az, dtype='float32')
el_array = np.array(el, dtype='float32')
obj_low.attrs['azimuth'] = az_array
obj_hi.attrs['azimuth'] = az_array
obj_low.attrs['elevation'] = el_array
obj_hi.attrs['elevation'] = el_array
if transpose:
obj_low = obj_low.transpose()
obj_hi = obj_hi.transpose()
return obj_low, obj_hi
def read_psl_wind_profiler_temperature(filepath):
"""
Returns `xarray.Dataset` with stored data and metadata from a user-defined
NOAA PSL wind profiler temperature file.
Parameters
----------
filename : str
Name of file(s) to read.
Return
------
ds : Xarray.dataset
Standard Xarray dataset with the data
"""
# Open the file, read in the lines as a list, and return that list
file = fsspec.open(filepath).open()
lines = file.readlines()
lines = [x.decode().rstrip()[:] for x in lines][1:]
# Separate sections based on the $ separator in the file
sections_of_file = (list(g) for _, g in groupby(lines, key='$'.__ne__))
# Count how many lines need to be skipped when reading into pandas
start_line = 0
list_of_datasets = []
for section in sections_of_file:
if section[0] != '$':
list_of_datasets.append(
_parse_psl_temperature_lines(filepath, section, line_offset=start_line)
)
start_line += len(section)
# Merge the resultant datasets together
return xr.concat(list_of_datasets, dim='time').transpose('HT', 'time')
def _parse_psl_temperature_lines(filepath, lines, line_offset=0):
"""
Reads lines related to temperature in a psl file
Parameters
----------
filename : str
Name of file(s) to read.
lines = list
List of strings containing the lines to parse
line_offset = int (default = 0)
Offset to start reading the pandas data table
Returns
-------
ds = xr.Dataset
Xarray dataset with temperature data
"""
# 1 - site
site = lines[0]
# 2 - datetype
datatype, _, version = filter_list(lines[1].split(' '))
# 3 - station lat, lon, elevation
latitude, longitude, elevation = filter_list(lines[2].split(' ')).astype(float)
# 4 - year, month, day, hour, minute, second, utc
time = parse_date_line(lines[3])
# 5 - Consensus averaging time, number of beams, number of range gates
consensus_average_time, number_of_beams, number_of_range_gates = filter_list(
lines[4].split(' ')
).astype(int)
# 7 - number of coherent integrations, number of spectral averages, pulse width, indder pulse period
(
number_coherent_integrations,
number_spectral_averages,
pulse_width,
inner_pulse_period,
) = filter_list(lines[6].split(' ')).astype(int)
# 8 - full-scale doppler value, delay to first gate, number of gates, spacing of gates
full_scale_doppler, delay_first_gate, number_of_gates, spacing_of_gates = filter_list(
lines[7].split(' ')
).astype(float)
# 9 - beam azimuth (degrees clockwise from north)
beam_azimuth, beam_elevation = filter_list(lines[8].split(' ')).astype(float)
# Read in the data table section using pandas
df = pd.read_csv(filepath, skiprows=line_offset + 10, delim_whitespace=True)
# Only read in the number of rows for a given set of gates
df = df.iloc[: int(number_of_gates)]
# Grab a list of valid columns, exept time
columns = set(list(df.columns)) - {'time'}
# Set the data types to be floats
df = df[list(columns)].astype(float)
# Nan values are encoded as 999999 - let's reflect that
df = df.replace(999999.0, np.nan)
# Ensure the height array is stored as a float
df['HT'] = df.HT.astype(float)
# Set the height as an index
df = df.set_index('HT')
# Rename the count and snr columns more usefully
df = df.rename(
columns={
'CNT': 'CNT_T',
'CNT.1': 'CNT_Tc',
'CNT.2': 'CNT_W',
'SNR': 'SNR_T',
'SNR.1': 'SNR_Tc',
'SNR.2': 'SNR_W',
}
)
# Convert to an xaray dataset
ds = df.to_xarray()
# Add attributes to variables
# Height
ds['HT'].attrs['long_name'] = 'height_above_ground'
ds['HT'].attrs['units'] = 'km'
# Temperature
ds['T'].attrs['long_name'] = 'average_uncorrected_RASS_temperature'
ds['T'].attrs['units'] = 'degC'
ds['Tc'].attrs['long_name'] = 'average_corrected_RASS_temperature'
ds['Tc'].attrs['units'] = 'degC'
# Vertical motion (w)
ds['W'].attrs['long_name'] = 'average_vertical_wind'
ds['W'].attrs['units'] = 'm/s'
# Add time to our dataset
ds['time'] = time
# Add in our additional attributes
ds.attrs['site_identifier'] = site
ds.attrs['latitude'] = latitude
ds.attrs['longitude'] = longitude
ds.attrs['elevation'] = elevation
ds.attrs['beam_azimuth'] = beam_azimuth
ds.attrs['revision_number'] = version
ds.attrs[
'data_description'
] = 'https://psl.noaa.gov/data/obs/data/view_data_type_info.php?SiteID=ctd&DataOperationalID=5855&OperationalID=2371'
ds.attrs['consensus_average_time'] = consensus_average_time
ds.attrs['number_of_beams'] = int(number_of_beams)
ds.attrs['number_of_gates'] = int(number_of_gates)
ds.attrs['number_of_range_gates'] = int(number_of_range_gates)
ds.attrs['number_spectral_averages'] = int(number_spectral_averages)
ds.attrs['pulse_width'] = pulse_width
ds.attrs['inner_pulse_period'] = inner_pulse_period
ds.attrs['full_scale_doppler_value'] = full_scale_doppler
ds.attrs['spacing_of_gates'] = spacing_of_gates
return ds
def filter_list(list_of_strings):
"""
Parses a list of strings, remove empty strings, and return a numpy array
"""
return np.array(list(filter(None, list_of_strings)))
def parse_date_line(list_of_strings):
"""
Parses the date line in PSL files
"""
year, month, day, hour, minute, second, utc_offset = filter_list(
list_of_strings.split(' ')
).astype(int)
year += 2000
return datetime(year, month, day, hour, minute, second)
<|code_end|>
|
BUG: Fix QC bug in tests
Now CI is failing due to some bug on ubuntu systems
```bash
def test_qc_flag_description():
[32](https://github.com/ARM-DOE/ACT/runs/7508339706?check_suite_focus=true#step:6:33)
"""
[33](https://github.com/ARM-DOE/ACT/runs/7508339706?check_suite_focus=true#step:6:34)
This will check if the cleanup() method will correctly convert convert
[34](https://github.com/ARM-DOE/ACT/runs/7508339706?check_suite_focus=true#step:6:35)
flag_#_description to CF flag_masks and flag_meanings.
[35](https://github.com/ARM-DOE/ACT/runs/7508339706?check_suite_focus=true#step:6:36)
[36](https://github.com/ARM-DOE/ACT/runs/7508339706?check_suite_focus=true#step:6:37)
"""
[37](https://github.com/ARM-DOE/ACT/runs/7508339706?check_suite_focus=true#step:6:38)
[38](https://github.com/ARM-DOE/ACT/runs/7508339706?check_suite_focus=true#step:6:39)
ds = read_netcdf(EXAMPLE_CO2FLX4M)
[39](https://github.com/ARM-DOE/ACT/runs/7508339706?check_suite_focus=true#step:6:40)
ds.clean.cleanup()
[40](https://github.com/ARM-DOE/ACT/runs/7508339706?check_suite_focus=true#step:6:41)
qc_var_name = ds.qcfilter.check_for_ancillary_qc(
[41](https://github.com/ARM-DOE/ACT/runs/7508339706?check_suite_focus=true#step:6:42)
'momentum_flux', add_if_missing=False, cleanup=False
[42](https://github.com/ARM-DOE/ACT/runs/7508339706?check_suite_focus=true#step:6:43)
)
[43](https://github.com/ARM-DOE/ACT/runs/7508339706?check_suite_focus=true#step:6:44)
[44](https://github.com/ARM-DOE/ACT/runs/7508339706?check_suite_focus=true#step:6:45)
assert isinstance(ds[qc_var_name].attrs['flag_masks'], list)
[45](https://github.com/ARM-DOE/ACT/runs/7508339706?check_suite_focus=true#step:6:46)
assert isinstance(ds[qc_var_name].attrs['flag_meanings'], list)
[46](https://github.com/ARM-DOE/ACT/runs/7508339706?check_suite_focus=true#step:6:47)
assert isinstance(ds[qc_var_name].attrs['flag_assessments'], list)
[47](https://github.com/ARM-DOE/ACT/runs/7508339706?check_suite_focus=true#step:6:48)
assert ds[qc_var_name].attrs['standard_name'] == 'quality_flag'
[48](https://github.com/ARM-DOE/ACT/runs/7508339706?check_suite_focus=true#step:6:49)
[49](https://github.com/ARM-DOE/ACT/runs/7508339706?check_suite_focus=true#step:6:50)
assert len(ds[qc_var_name].attrs['flag_masks']) == 9
[50](https://github.com/ARM-DOE/ACT/runs/7508339706?check_suite_focus=true#step:6:51)
unique_flag_assessments = list({'Acceptable', 'Indeterminate', 'Bad'})
[51](https://github.com/ARM-DOE/ACT/runs/7508339706?check_suite_focus=true#step:6:52)
> assert list(set(ds[qc_var_name].attrs['flag_assessments'])) == unique_flag_assessments
[52](https://github.com/ARM-DOE/ACT/runs/7508339706?check_suite_focus=true#step:6:53)
E AssertionError: assert ['Indetermina...table', 'Bad'] == ['Indetermina... 'Acceptable']
[53](https://github.com/ARM-DOE/ACT/runs/7508339706?check_suite_focus=true#step:6:54)
E At index 1 diff: 'Acceptable' != 'Bad'
[54](https://github.com/ARM-DOE/ACT/runs/7508339706?check_suite_focus=true#step:6:55)
E Full diff:
[55](https://github.com/ARM-DOE/ACT/runs/7508339706?check_suite_focus=true#step:6:56)
E - ['Indeterminate', 'Bad', 'Acceptable']
[56](https://github.com/ARM-DOE/ACT/runs/7508339706?check_suite_focus=true#step:6:57)
E ? -------
[57](https://github.com/ARM-DOE/ACT/runs/7508339706?check_suite_focus=true#step:6:58)
E + ['Indeterminate', 'Acceptable', 'Bad']
[58](https://github.com/ARM-DOE/ACT/runs/7508339706?check_suite_focus=true#step:6:59)
E ? +++++++
[59](https://github.com/ARM-DOE/ACT/runs/7508339706?check_suite_focus=true#step:6:60)
[60](https://github.com/ARM-DOE/ACT/runs/7508339706?check_suite_focus=true#step:6:61)
act/tests/test_qc.py:814: AssertionError
```
|
act/discovery/get_armfiles.py
<|code_start|>"""
Script for downloading data from ARM's Live Data Webservice
"""
import argparse
import json
import os
import sys
try:
from urllib.request import urlopen
except ImportError:
from urllib import urlopen
from act.utils import date_parser
def download_data(username, token, datastream, startdate, enddate, time=None, output=None):
"""
This tool will help users utilize the ARM Live Data Webservice to download
ARM data.
Parameters
----------
username : str
The username to use for logging into the ADC archive.
token : str
The access token for accessing the ADC archive.
datastream : str
The name of the datastream to acquire.
startdate : str
The start date of the data to acquire. Formats accepted are
YYYY-MM-DD, DD.MM.YYYY, DD/MM/YYYY, YYYYMMDD, YYYY/MM/DD or
any of the previous formats with THH:MM:SS added onto the end
(ex. 2020-09-15T12:00:00).
enddate : str
The end date of the data to acquire. Formats accepted are
YYYY-MM-DD, DD.MM.YYYY, DD/MM/YYYY, YYYYMMDD or YYYY/MM/DD, or
any of the previous formats with THH:MM:SS added onto the end
(ex. 2020-09-15T13:00:00).
time: str or None
The specific time. Format is HHMMSS. Set to None to download all files
in the given date interval.
output : str
The output directory for the data. Set to None to make a folder in the
current working directory with the same name as *datastream* to place
the files in.
Returns
-------
files : list
Returns list of files retrieved
Notes
-----
This programmatic interface allows users to query and automate
machine-to-machine downloads of ARM data. This tool uses a REST URL and
specific parameters (saveData, query), user ID and access token, a
datastream name, a start date, and an end date, and data files matching
the criteria will be returned to the user and downloaded.
By using this web service, users can setup cron jobs and automatically
download data from /data/archive into their workspace. This will also
eliminate the manual step of following a link in an email to download data.
All other data files, which are not on the spinning
disk (on HPSS), will have to go through the regular ordering process.
More information about this REST API and tools can be found on `ARM Live
<https://adc.arm.gov/armlive/#scripts>`_.
To login/register for an access token click `here
<https://adc.arm.gov/armlive/livedata/home>`_.
Author: Michael Giansiracusa
Email: [email protected]
Web Tools Contact: Ranjeet Devarakonda [email protected]
Examples
--------
This code will download the netCDF files from the sgpmetE13.b1 datastream
and place them in a directory named sgpmetE13.b1. The data from 14 Jan to
20 Jan 2017 will be downloaded. Replace *userName* and *XXXXXXXXXXXXXXXX*
with your username and token for ARM Data Discovery. See the Notes for
information on how to obtain a username and token.
.. code-block:: python
act.discovery.download_data(
"userName", "XXXXXXXXXXXXXXXX", "sgpmetE13.b1", "2017-01-14", "2017-01-20"
)
"""
# default start and end are empty
start, end = '', ''
# start and end strings for query_url are constructed
# if the arguments were provided
if startdate:
start = date_parser(startdate, output_format='%Y-%m-%dT%H:%M:%S.%f')[:-3] + 'Z'
start = f'&start={start}'
if enddate:
end = date_parser(enddate, output_format='%Y-%m-%dT%H:%M:%S.%f')[:-3] + 'Z'
end = f'&end={end}'
# build the url to query the web service using the arguments provided
query_url = (
'https://adc.arm.gov/armlive/livedata/query?' + 'user={0}&ds={1}{2}{3}&wt=json'
).format(':'.join([username, token]), datastream, start, end)
# get url response, read the body of the message,
# and decode from bytes type to utf-8 string
response_body = urlopen(query_url).read().decode('utf-8')
# if the response is an html doc, then there was an error with the user
if response_body[1:14] == '!DOCTYPE html':
raise ConnectionRefusedError('Error with user. Check username or token.')
# parse into json object
response_body_json = json.loads(response_body)
# construct output directory
if output:
# output files to directory specified
output_dir = os.path.join(output)
else:
# if no folder given, add datastream folder
# to current working dir to prevent file mix-up
output_dir = os.path.join(os.getcwd(), datastream)
# not testing, response is successful and files were returned
if response_body_json is None:
print('ARM Data Live Webservice does not appear to be functioning')
return []
num_files = len(response_body_json['files'])
file_names = []
if response_body_json['status'] == 'success' and num_files > 0:
for fname in response_body_json['files']:
if time is not None:
if time not in fname:
continue
print(f'[DOWNLOADING] {fname}')
# construct link to web service saveData function
save_data_url = (
'https://adc.arm.gov/armlive/livedata/' + 'saveData?user={0}&file={1}'
).format(':'.join([username, token]), fname)
output_file = os.path.join(output_dir, fname)
# make directory if it doesn't exist
if not os.path.isdir(output_dir):
os.makedirs(output_dir)
# create file and write bytes to file
with open(output_file, 'wb') as open_bytes_file:
open_bytes_file.write(urlopen(save_data_url).read())
file_names.append(output_file)
else:
print(
'No files returned or url status error.\n' 'Check datastream name, start, and end date.'
)
return file_names
<|code_end|>
|
act/discovery/get_armfiles.py
<|code_start|>"""
Script for downloading data from ARM's Live Data Webservice
"""
import argparse
import json
import os
import sys
from datetime import timedelta
try:
from urllib.request import urlopen
except ImportError:
from urllib import urlopen
from act.utils import date_parser
def download_data(username, token, datastream, startdate, enddate, time=None, output=None):
"""
This tool will help users utilize the ARM Live Data Webservice to download
ARM data.
Parameters
----------
username : str
The username to use for logging into the ADC archive.
token : str
The access token for accessing the ADC archive.
datastream : str
The name of the datastream to acquire.
startdate : str
The start date of the data to acquire. Formats accepted are
YYYY-MM-DD, DD.MM.YYYY, DD/MM/YYYY, YYYYMMDD, YYYY/MM/DD or
any of the previous formats with THH:MM:SS added onto the end
(ex. 2020-09-15T12:00:00).
enddate : str
The end date of the data to acquire. Formats accepted are
YYYY-MM-DD, DD.MM.YYYY, DD/MM/YYYY, YYYYMMDD or YYYY/MM/DD, or
any of the previous formats with THH:MM:SS added onto the end
(ex. 2020-09-15T13:00:00).
time: str or None
The specific time. Format is HHMMSS. Set to None to download all files
in the given date interval.
output : str
The output directory for the data. Set to None to make a folder in the
current working directory with the same name as *datastream* to place
the files in.
Returns
-------
files : list
Returns list of files retrieved
Notes
-----
This programmatic interface allows users to query and automate
machine-to-machine downloads of ARM data. This tool uses a REST URL and
specific parameters (saveData, query), user ID and access token, a
datastream name, a start date, and an end date, and data files matching
the criteria will be returned to the user and downloaded.
By using this web service, users can setup cron jobs and automatically
download data from /data/archive into their workspace. This will also
eliminate the manual step of following a link in an email to download data.
All other data files, which are not on the spinning
disk (on HPSS), will have to go through the regular ordering process.
More information about this REST API and tools can be found on `ARM Live
<https://adc.arm.gov/armlive/#scripts>`_.
To login/register for an access token click `here
<https://adc.arm.gov/armlive/livedata/home>`_.
Author: Michael Giansiracusa
Email: [email protected]
Web Tools Contact: Ranjeet Devarakonda [email protected]
Examples
--------
This code will download the netCDF files from the sgpmetE13.b1 datastream
and place them in a directory named sgpmetE13.b1. The data from 14 Jan to
20 Jan 2017 will be downloaded. Replace *userName* and *XXXXXXXXXXXXXXXX*
with your username and token for ARM Data Discovery. See the Notes for
information on how to obtain a username and token.
.. code-block:: python
act.discovery.download_data(
"userName", "XXXXXXXXXXXXXXXX", "sgpmetE13.b1", "2017-01-14", "2017-01-20"
)
"""
# default start and end are empty
start, end = '', ''
# start and end strings for query_url are constructed
# if the arguments were provided
if startdate:
start_datetime = date_parser(startdate, return_datetime=True)
start = start_datetime.strftime('%Y-%m-%dT%H:%M:%S.%f')[:-3] + 'Z'
start = f'&start={start}'
if enddate:
end_datetime = date_parser(enddate, return_datetime=True)
# If the start and end date are the same, and a day to the end date
if start_datetime == end_datetime:
end_datetime += timedelta(days=1)
end = end_datetime.strftime('%Y-%m-%dT%H:%M:%S.%f')[:-3] + 'Z'
end = f'&end={end}'
# build the url to query the web service using the arguments provided
query_url = (
'https://adc.arm.gov/armlive/livedata/query?' + 'user={0}&ds={1}{2}{3}&wt=json'
).format(':'.join([username, token]), datastream, start, end)
# get url response, read the body of the message,
# and decode from bytes type to utf-8 string
response_body = urlopen(query_url).read().decode('utf-8')
# if the response is an html doc, then there was an error with the user
if response_body[1:14] == '!DOCTYPE html':
raise ConnectionRefusedError('Error with user. Check username or token.')
# parse into json object
response_body_json = json.loads(response_body)
# construct output directory
if output:
# output files to directory specified
output_dir = os.path.join(output)
else:
# if no folder given, add datastream folder
# to current working dir to prevent file mix-up
output_dir = os.path.join(os.getcwd(), datastream)
# not testing, response is successful and files were returned
if response_body_json is None:
print('ARM Data Live Webservice does not appear to be functioning')
return []
num_files = len(response_body_json['files'])
file_names = []
if response_body_json['status'] == 'success' and num_files > 0:
for fname in response_body_json['files']:
if time is not None:
if time not in fname:
continue
print(f'[DOWNLOADING] {fname}')
# construct link to web service saveData function
save_data_url = (
'https://adc.arm.gov/armlive/livedata/' + 'saveData?user={0}&file={1}'
).format(':'.join([username, token]), fname)
output_file = os.path.join(output_dir, fname)
# make directory if it doesn't exist
if not os.path.isdir(output_dir):
os.makedirs(output_dir)
# create file and write bytes to file
with open(output_file, 'wb') as open_bytes_file:
open_bytes_file.write(urlopen(save_data_url).read())
file_names.append(output_file)
else:
print(
'No files returned or url status error.\n' 'Check datastream name, start, and end date.'
)
return file_names
<|code_end|>
|
PSL Wind Profile Reader Hardcoded to CTD site
Line 38 in io.noaapsl.read_psl_wind_profiler is hardcoded to the CTD site, we should automatically read the site code in and parse off of that or update to pass in a site name.
|
act/io/noaapsl.py
<|code_start|>"""
Modules for reading in NOAA PSL data.
"""
from datetime import datetime
from itertools import groupby
import fsspec
import numpy as np
import pandas as pd
import xarray as xr
def read_psl_wind_profiler(filename, transpose=True):
"""
Returns `xarray.Dataset` with stored data and metadata from a user-defined
NOAA PSL wind profiler file.
Parameters
----------
filename : str
Name of file(s) to read.
transpose : bool
True to transpose the data.
Return
------
obj_low : Xarray.dataset
Standard Xarray dataset with the data for low mode
obj_high : Xarray.dataset
Standard Xarray dataset with the data for high mode.
"""
# read file with pandas for preparation.
df = pd.read_csv(filename, header=None)
# Get location of where each table begins
index_list = df[0] == ' CTD'
idx = np.where(index_list)
# Get header of each column of data.
column_list = list(df.loc[9][0].split())
beam_vars = ['RAD', 'CNT', 'SNR', 'QC']
for i, c in enumerate(column_list):
if c in beam_vars:
if column_list.count(c) > 2:
column_list[i] = c + '1'
elif column_list.count(c) > 1:
column_list[i] = c + '2'
elif column_list.count(c) > 0:
column_list[i] = c + '3'
# Loop through column data only which appears after 10 lines of metadata.
# Year, Month, day, hour, minute, second, utc offset
low = []
hi = []
for i in range(idx[0].shape[0] - 1):
# index each table by using the idx of when CTD appears.
# str split is use as 2 spaces are added to each data point,
# convert to float.
date_str = df.iloc[idx[0][i] + 3]
date_str = list(filter(None, date_str[0].split(' ')))
date_str = list(map(int, date_str))
# Datetime not taking into account the utc offset yet
time = datetime(
2000 + date_str[0],
date_str[1],
date_str[2],
date_str[3],
date_str[4],
date_str[5],
)
mode = df.iloc[idx[0][i] + 7][0]
mode = int(mode.split(' ')[-1])
df_array = np.array(
df.iloc[idx[0][i] + 10 : idx[0][i + 1] - 1][0].str.split(r'\s{2,}').tolist(),
dtype='float',
)
df_add = pd.DataFrame(df_array, columns=column_list)
df_add = df_add.replace(999999.0, np.nan)
xr_add = df_add.to_xarray()
xr_add = xr_add.swap_dims({'index': 'height'})
xr_add = xr_add.reset_coords('index')
xr_add = xr_add.assign_coords({'time': np.array(time), 'height': xr_add['HT'].values})
if mode < 1000.0:
low.append(xr_add)
else:
hi.append(xr_add)
obj_low = xr.concat(low, 'time')
obj_hi = xr.concat(hi, 'time')
# Adding site information line 1
site_loc = df.iloc[idx[0][0]]
site_list = site_loc.str.split(r'\s{2}').tolist()
site = site_list[0][0].strip()
obj_low.attrs['site_identifier'] = site
obj_hi.attrs['site_identifier'] = site
# Adding data type and revision number line 2.
rev = df.loc[idx[0][0] + 1]
rev_list = rev.str.split(r'\s{3}').tolist()
rev_array = np.array(rev_list[0])
obj_low.attrs['data_type'] = rev_array[0].strip()
obj_hi.attrs['data_type'] = rev_array[0].strip()
obj_low.attrs['revision_number'] = rev_array[1].strip()
obj_hi.attrs['revision_number'] = rev_array[1].strip()
# Adding coordinate attributes line 3.
coords = df.loc[idx[0][0] + 2]
coords_list = coords.str.split(r'\s{2,}').tolist()
coords_list[0].remove('')
coords_array = np.array(coords_list[0], dtype='float32')
obj_low.attrs['latitude'] = np.array([coords_array[0]])
obj_hi.attrs['latitude'] = np.array([coords_array[0]])
obj_low.attrs['longitude'] = np.array([coords_array[1]])
obj_hi.attrs['longitude'] = np.array([coords_array[1]])
obj_low.attrs['altitude'] = np.array([coords_array[2]])
obj_hi.attrs['altitude'] = np.array([coords_array[2]])
# Adding azimuth and elevation line 9
az_el = df.loc[idx[0][0] + 8]
az_el_list = az_el.str.split(r'\s{2,}').tolist()
az_el_list[0].remove('')
az_el_array = np.array(az_el_list[0])
az = []
el = []
for i in az_el_array:
sep = i.split()
az.append(sep[0])
el.append(sep[1])
az_array = np.array(az, dtype='float32')
el_array = np.array(el, dtype='float32')
obj_low.attrs['azimuth'] = az_array
obj_hi.attrs['azimuth'] = az_array
obj_low.attrs['elevation'] = el_array
obj_hi.attrs['elevation'] = el_array
if transpose:
obj_low = obj_low.transpose()
obj_hi = obj_hi.transpose()
return obj_low, obj_hi
def read_psl_wind_profiler_temperature(filepath):
"""
Returns `xarray.Dataset` with stored data and metadata from a user-defined
NOAA PSL wind profiler temperature file.
Parameters
----------
filename : str
Name of file(s) to read.
Return
------
ds : Xarray.dataset
Standard Xarray dataset with the data
"""
# Open the file, read in the lines as a list, and return that list
file = fsspec.open(filepath).open()
lines = file.readlines()
lines = [x.decode().rstrip()[:] for x in lines][1:]
# Separate sections based on the $ separator in the file
sections_of_file = (list(g) for _, g in groupby(lines, key='$'.__ne__))
# Count how many lines need to be skipped when reading into pandas
start_line = 0
list_of_datasets = []
for section in sections_of_file:
if section[0] != '$':
list_of_datasets.append(
_parse_psl_temperature_lines(filepath, section, line_offset=start_line)
)
start_line += len(section)
# Merge the resultant datasets together
return xr.concat(list_of_datasets, dim='time').transpose('HT', 'time')
def _parse_psl_temperature_lines(filepath, lines, line_offset=0):
"""
Reads lines related to temperature in a psl file
Parameters
----------
filename : str
Name of file(s) to read.
lines = list
List of strings containing the lines to parse
line_offset = int (default = 0)
Offset to start reading the pandas data table
Returns
-------
ds = xr.Dataset
Xarray dataset with temperature data
"""
# 1 - site
site = lines[0]
# 2 - datetype
datatype, _, version = filter_list(lines[1].split(' '))
# 3 - station lat, lon, elevation
latitude, longitude, elevation = filter_list(lines[2].split(' ')).astype(float)
# 4 - year, month, day, hour, minute, second, utc
time = parse_date_line(lines[3])
# 5 - Consensus averaging time, number of beams, number of range gates
consensus_average_time, number_of_beams, number_of_range_gates = filter_list(
lines[4].split(' ')
).astype(int)
# 7 - number of coherent integrations, number of spectral averages, pulse width, indder pulse period
(
number_coherent_integrations,
number_spectral_averages,
pulse_width,
inner_pulse_period,
) = filter_list(lines[6].split(' ')).astype(int)
# 8 - full-scale doppler value, delay to first gate, number of gates, spacing of gates
full_scale_doppler, delay_first_gate, number_of_gates, spacing_of_gates = filter_list(
lines[7].split(' ')
).astype(float)
# 9 - beam azimuth (degrees clockwise from north)
beam_azimuth, beam_elevation = filter_list(lines[8].split(' ')).astype(float)
# Read in the data table section using pandas
df = pd.read_csv(filepath, skiprows=line_offset + 10, delim_whitespace=True)
# Only read in the number of rows for a given set of gates
df = df.iloc[: int(number_of_gates)]
# Grab a list of valid columns, exept time
columns = set(list(df.columns)) - {'time'}
# Set the data types to be floats
df = df[list(columns)].astype(float)
# Nan values are encoded as 999999 - let's reflect that
df = df.replace(999999.0, np.nan)
# Ensure the height array is stored as a float
df['HT'] = df.HT.astype(float)
# Set the height as an index
df = df.set_index('HT')
# Rename the count and snr columns more usefully
df = df.rename(
columns={
'CNT': 'CNT_T',
'CNT.1': 'CNT_Tc',
'CNT.2': 'CNT_W',
'SNR': 'SNR_T',
'SNR.1': 'SNR_Tc',
'SNR.2': 'SNR_W',
}
)
# Convert to an xaray dataset
ds = df.to_xarray()
# Add attributes to variables
# Height
ds['HT'].attrs['long_name'] = 'height_above_ground'
ds['HT'].attrs['units'] = 'km'
# Temperature
ds['T'].attrs['long_name'] = 'average_uncorrected_RASS_temperature'
ds['T'].attrs['units'] = 'degC'
ds['Tc'].attrs['long_name'] = 'average_corrected_RASS_temperature'
ds['Tc'].attrs['units'] = 'degC'
# Vertical motion (w)
ds['W'].attrs['long_name'] = 'average_vertical_wind'
ds['W'].attrs['units'] = 'm/s'
# Add time to our dataset
ds['time'] = time
# Add in our additional attributes
ds.attrs['site_identifier'] = site
ds.attrs['latitude'] = latitude
ds.attrs['longitude'] = longitude
ds.attrs['elevation'] = elevation
ds.attrs['beam_azimuth'] = beam_azimuth
ds.attrs['revision_number'] = version
ds.attrs[
'data_description'
] = 'https://psl.noaa.gov/data/obs/data/view_data_type_info.php?SiteID=ctd&DataOperationalID=5855&OperationalID=2371'
ds.attrs['consensus_average_time'] = consensus_average_time
ds.attrs['number_of_beams'] = int(number_of_beams)
ds.attrs['number_of_gates'] = int(number_of_gates)
ds.attrs['number_of_range_gates'] = int(number_of_range_gates)
ds.attrs['number_spectral_averages'] = int(number_spectral_averages)
ds.attrs['pulse_width'] = pulse_width
ds.attrs['inner_pulse_period'] = inner_pulse_period
ds.attrs['full_scale_doppler_value'] = full_scale_doppler
ds.attrs['spacing_of_gates'] = spacing_of_gates
return ds
def filter_list(list_of_strings):
"""
Parses a list of strings, remove empty strings, and return a numpy array
"""
return np.array(list(filter(None, list_of_strings)))
def parse_date_line(list_of_strings):
"""
Parses the date line in PSL files
"""
year, month, day, hour, minute, second, utc_offset = filter_list(
list_of_strings.split(' ')
).astype(int)
year += 2000
return datetime(year, month, day, hour, minute, second)
def read_psl_parsivel(files):
"""
Returns `xarray.Dataset` with stored data and metadata from a user-defined
NOAA PSL parsivel
Parameters
----------
files : str or list
Name of file(s) or urls to read.
Return
------
obj : Xarray.dataset
Standard Xarray dataset with the data for the parsivel
"""
# Define the names for the variables
names = ['time', 'B1', 'B2', 'B3', 'B4', 'B5', 'B6', 'B7', 'B8', 'B9', 'B10', 'B11', 'B12',
'B13', 'B14', 'B15', 'B16', 'B17', 'B18', 'B19', 'B20', 'B21', 'B22', 'B23', 'B24',
'B25', 'B26', 'B27', 'B28', 'B29', 'B30', 'B31', 'B32', 'blackout', 'good', 'bad',
'number_detected_particles', 'precip_rate', 'precip_amount', 'precip_accumulation',
'equivalent_radar_reflectivity', 'number_in_error', 'dirty', 'very_dirty', 'damaged',
'laserband_amplitude', 'laserband_amplitude_stdev', 'sensor_temperature', 'sensor_temperature_stdev',
'sensor_voltage', 'sensor_voltage_stdev', 'heating_current', 'heating_current_stdev', 'number_rain_particles',
'number_non_rain_particles', 'number_ambiguous_particles', 'precip_type']
# Define the particle sizes and class width sizes based on
# https://psl.noaa.gov/data/obs/data/view_data_type_info.php?SiteID=ctd&DataOperationalID=5890
vol_equiv_diam = [0.062, 0.187, 0.312, 0.437, 0.562, 0.687, 0.812, 0.937, 1.062, 1.187, 1.375,
1.625, 1.875, 2.125, 2.375, 2.75, 3.25, 3.75, 4.25, 4.75, 5.5, 6.5, 7.5, 8.5,
9.5, 11.0, 13.0, 15.0, 17.0, 19.0, 21.5, 24.5]
class_size_width = [0.125, 0.125, 0.125, 0.125, 0.125, 0.125, 0.125, 0.125, 0.125, 0.125,
0.250, 0.250, 0.250, 0.250, 0.250, 0.5, 0.5, 0.5, 0.5, 0.5,
1.0, 1.0, 1.0, 1.0, 1.0, 2.0, 2.0, 2.0, 2.0, 2.0, 3.0, 3.0]
if not isinstance(files, list):
files = [files]
# Loop through each file or url and append the dataframe into data for concatenations
data = []
end_time = []
for f in files:
df = pd.read_table(f, skiprows=[0, 1, 2], names=names, index_col=0, sep='\s+')
# Reading the table twice to get the date so it can be parsed appropriately
date = pd.read_table(f, nrows=0).to_string().split(' ')[-3]
time = df.index
start_time = []
form = '%y%j%H:%M:%S:%f'
for t in time:
start_time.append(pd.to_datetime(date + ':' + t.split('-')[0], format=form))
end_time.append(pd.to_datetime(date + ':' + t.split('-')[1], format=form))
df.index = start_time
data.append(df)
df = pd.concat(data)
# Create a 2D size distribution variable from all the B* variables
dsd = []
for n in names:
if 'B' not in n:
continue
dsd.append(list(df[n]))
# Convert the dataframe to xarray DataSet and add variables
obj = df.to_xarray()
obj = obj.rename({'index': 'time'})
long_name = 'Drop Size Distribution'
attrs = {'long_name': long_name, 'units': 'count'}
da = xr.DataArray(np.transpose(dsd), dims=['time', 'particle_size'], coords=[obj['time'].values, vol_equiv_diam])
obj['number_density_drops'] = da
attrs = {'long_name': 'Particle class size average', 'units': 'mm'}
da = xr.DataArray(class_size_width, dims=['particle_size'], coords=[vol_equiv_diam], attrs=attrs)
obj['class_size_width'] = da
attrs = {'long_name': 'Class size width', 'units': 'mm'}
da = xr.DataArray(vol_equiv_diam, dims=['particle_size'], coords=[vol_equiv_diam], attrs=attrs)
obj['particle_size'] = da
attrs = {'long_name': 'End time of averaging interval'}
da = xr.DataArray(end_time, dims=['time'], coords=[obj['time'].values], attrs=attrs)
obj['interval_end_time'] = da
# Define the attribuets and metadata and add into the DataSet
attrs = {'blackout': {'long_name': 'Number of samples excluded during PC clock sync', 'units': 'count'},
'good': {'long_name': 'Number of samples that passed QC checks', 'units': 'count'},
'bad': {'long_name': 'Number of samples that failed QC checks', 'units': 'count'},
'number_detected_particles': {'long_name': 'Total number of detected particles', 'units': 'count'},
'precip_rate': {'long_name': 'Precipitation rate', 'units': 'mm/hr'},
'precip_amount': {'long_name': 'Interval accumulation', 'units': 'mm'},
'precip_accumulation': {'long_name': 'Event accumulation', 'units': 'mm'},
'equivalent_radar_reflectivity': {'long_name': 'Radar Reflectivity', 'units': 'dB'},
'number_in_error': {'long_name': 'Number of samples that were reported dirt, very dirty, or damaged', 'units': 'count'},
'dirty': {'long_name': 'Laser glass is dirty but measurement is still possible', 'units': 'unitless'},
'very_dirty': {'long_name': 'Laser glass is dirty, partially covered no further measurements are possible', 'units': 'unitless'},
'damaged': {'long_name': 'Laser damaged', 'units': 'unitless'},
'laserband_amplitude': {'long_name': 'Average signal amplitude of the laser strip', 'units': 'unitless'},
'laserband_amplitude_stdev': {'long_name': 'Standard deviation of the signal amplitude of the laser strip', 'units': 'unitless'},
'sensor_temperature': {'long_name': 'Average sensor temperature', 'units': 'degC'},
'sensor_temperature_stdev': {'long_name': 'Standard deviation of sensor temperature', 'units': 'degC'},
'sensor_voltage': {'long_name': 'Sensor power supply voltage', 'units': 'V'},
'sensor_voltage_stdev': {'long_name': 'Standard deviation of the sensor power supply voltage', 'units': 'V'},
'heating_current': {'long_name': 'Average heating system current', 'units': 'A'},
'heating_current_stdev': {'long_name': 'Standard deviation of heating system current', 'units': 'A'},
'number_rain_particles': {'long_name': 'Number of particles detected as rain', 'units': 'unitless'},
'number_non_rain_particles': {'long_name': 'Number of particles detected not as rain', 'units': 'unitless'},
'number_ambiguous_particles': {'long_name': 'Number of particles detected as ambiguous', 'units': 'unitless'},
'precip_type': {'long_name': 'Precipitation type (1=rain; 2=mixed; 3=snow)', 'units': 'unitless'},
'number_density_drops': {'long_name': 'Drop Size Distribution', 'units': 'count'}}
for v in obj:
if v in attrs:
obj[v].attrs = attrs[v]
return obj
<|code_end|>
|
act/io/noaapsl.py
<|code_start|>"""
Modules for reading in NOAA PSL data.
"""
from datetime import datetime
from itertools import groupby
import fsspec
import numpy as np
import pandas as pd
import xarray as xr
def read_psl_wind_profiler(filename, transpose=True):
"""
Returns `xarray.Dataset` with stored data and metadata from a user-defined
NOAA PSL wind profiler file.
Parameters
----------
filename : str
Name of file(s) to read.
transpose : bool
True to transpose the data.
Return
------
obj_low : Xarray.dataset
Standard Xarray dataset with the data for low mode
obj_high : Xarray.dataset
Standard Xarray dataset with the data for high mode.
"""
# read file with pandas for preparation.
df = pd.read_csv(filename, header=None)
# The first entry should be the station identifier (ex. CTD)
potential_site = df[0][0]
# Get location of where each table begins
index_list = df[0] == potential_site
idx = np.where(index_list)
# Get header of each column of data.
column_list = list(df.loc[9][0].split())
beam_vars = ['RAD', 'CNT', 'SNR', 'QC']
for i, c in enumerate(column_list):
if c in beam_vars:
if column_list.count(c) > 2:
column_list[i] = c + '1'
elif column_list.count(c) > 1:
column_list[i] = c + '2'
elif column_list.count(c) > 0:
column_list[i] = c + '3'
# Loop through column data only which appears after 10 lines of metadata.
# Year, Month, day, hour, minute, second, utc offset
low = []
hi = []
for i in range(idx[0].shape[0] - 1):
# index each table by using the idx of when CTD appears.
# str split is use as 2 spaces are added to each data point,
# convert to float.
date_str = df.iloc[idx[0][i] + 3]
date_str = list(filter(None, date_str[0].split(' ')))
date_str = list(map(int, date_str))
# Datetime not taking into account the utc offset yet
time = datetime(
2000 + date_str[0],
date_str[1],
date_str[2],
date_str[3],
date_str[4],
date_str[5],
)
mode = df.iloc[idx[0][i] + 7][0]
mode = int(mode.split(' ')[-1])
df_array = np.array(
df.iloc[idx[0][i] + 10 : idx[0][i + 1] - 1][0].str.split(r'\s{2,}').tolist(),
dtype='float',
)
df_add = pd.DataFrame(df_array, columns=column_list)
df_add = df_add.replace(999999.0, np.nan)
xr_add = df_add.to_xarray()
xr_add = xr_add.swap_dims({'index': 'height'})
xr_add = xr_add.reset_coords('index')
xr_add = xr_add.assign_coords({'time': np.array(time), 'height': xr_add['HT'].values})
if mode < 1000.0:
low.append(xr_add)
else:
hi.append(xr_add)
obj_low = xr.concat(low, 'time')
obj_hi = xr.concat(hi, 'time')
# Adding site information line 1
site_loc = df.iloc[idx[0][0]]
site_list = site_loc.str.split(r'\s{2}').tolist()
site = site_list[0][0].strip()
obj_low.attrs['site_identifier'] = site
obj_hi.attrs['site_identifier'] = site
# Adding data type and revision number line 2.
rev = df.loc[idx[0][0] + 1]
rev_list = rev.str.split(r'\s{3}').tolist()
rev_array = np.array(rev_list[0])
obj_low.attrs['data_type'] = rev_array[0].strip()
obj_hi.attrs['data_type'] = rev_array[0].strip()
obj_low.attrs['revision_number'] = rev_array[1].strip()
obj_hi.attrs['revision_number'] = rev_array[1].strip()
# Adding coordinate attributes line 3.
coords = df.loc[idx[0][0] + 2]
coords_list = coords.str.split(r'\s{2,}').tolist()
coords_list[0].remove('')
coords_array = np.array(coords_list[0], dtype='float32')
obj_low.attrs['latitude'] = np.array([coords_array[0]])
obj_hi.attrs['latitude'] = np.array([coords_array[0]])
obj_low.attrs['longitude'] = np.array([coords_array[1]])
obj_hi.attrs['longitude'] = np.array([coords_array[1]])
obj_low.attrs['altitude'] = np.array([coords_array[2]])
obj_hi.attrs['altitude'] = np.array([coords_array[2]])
# Adding azimuth and elevation line 9
az_el = df.loc[idx[0][0] + 8]
az_el_list = az_el.str.split(r'\s{2,}').tolist()
az_el_list[0].remove('')
az_el_array = np.array(az_el_list[0])
az = []
el = []
for i in az_el_array:
sep = i.split()
az.append(sep[0])
el.append(sep[1])
az_array = np.array(az, dtype='float32')
el_array = np.array(el, dtype='float32')
obj_low.attrs['azimuth'] = az_array
obj_hi.attrs['azimuth'] = az_array
obj_low.attrs['elevation'] = el_array
obj_hi.attrs['elevation'] = el_array
if transpose:
obj_low = obj_low.transpose()
obj_hi = obj_hi.transpose()
return obj_low, obj_hi
def read_psl_wind_profiler_temperature(filepath):
"""
Returns `xarray.Dataset` with stored data and metadata from a user-defined
NOAA PSL wind profiler temperature file.
Parameters
----------
filename : str
Name of file(s) to read.
Return
------
ds : Xarray.dataset
Standard Xarray dataset with the data
"""
# Open the file, read in the lines as a list, and return that list
file = fsspec.open(filepath).open()
lines = file.readlines()
lines = [x.decode().rstrip()[:] for x in lines][1:]
# Separate sections based on the $ separator in the file
sections_of_file = (list(g) for _, g in groupby(lines, key='$'.__ne__))
# Count how many lines need to be skipped when reading into pandas
start_line = 0
list_of_datasets = []
for section in sections_of_file:
if section[0] != '$':
list_of_datasets.append(
_parse_psl_temperature_lines(filepath, section, line_offset=start_line)
)
start_line += len(section)
# Merge the resultant datasets together
return xr.concat(list_of_datasets, dim='time').transpose('HT', 'time')
def _parse_psl_temperature_lines(filepath, lines, line_offset=0):
"""
Reads lines related to temperature in a psl file
Parameters
----------
filename : str
Name of file(s) to read.
lines = list
List of strings containing the lines to parse
line_offset = int (default = 0)
Offset to start reading the pandas data table
Returns
-------
ds = xr.Dataset
Xarray dataset with temperature data
"""
# 1 - site
site = lines[0]
# 2 - datetype
datatype, _, version = filter_list(lines[1].split(' '))
# 3 - station lat, lon, elevation
latitude, longitude, elevation = filter_list(lines[2].split(' ')).astype(float)
# 4 - year, month, day, hour, minute, second, utc
time = parse_date_line(lines[3])
# 5 - Consensus averaging time, number of beams, number of range gates
consensus_average_time, number_of_beams, number_of_range_gates = filter_list(
lines[4].split(' ')
).astype(int)
# 7 - number of coherent integrations, number of spectral averages, pulse width, indder pulse period
(
number_coherent_integrations,
number_spectral_averages,
pulse_width,
inner_pulse_period,
) = filter_list(lines[6].split(' ')).astype(int)
# 8 - full-scale doppler value, delay to first gate, number of gates, spacing of gates
full_scale_doppler, delay_first_gate, number_of_gates, spacing_of_gates = filter_list(
lines[7].split(' ')
).astype(float)
# 9 - beam azimuth (degrees clockwise from north)
beam_azimuth, beam_elevation = filter_list(lines[8].split(' ')).astype(float)
# Read in the data table section using pandas
df = pd.read_csv(filepath, skiprows=line_offset + 10, delim_whitespace=True)
# Only read in the number of rows for a given set of gates
df = df.iloc[: int(number_of_gates)]
# Grab a list of valid columns, exept time
columns = set(list(df.columns)) - {'time'}
# Set the data types to be floats
df = df[list(columns)].astype(float)
# Nan values are encoded as 999999 - let's reflect that
df = df.replace(999999.0, np.nan)
# Ensure the height array is stored as a float
df['HT'] = df.HT.astype(float)
# Set the height as an index
df = df.set_index('HT')
# Rename the count and snr columns more usefully
df = df.rename(
columns={
'CNT': 'CNT_T',
'CNT.1': 'CNT_Tc',
'CNT.2': 'CNT_W',
'SNR': 'SNR_T',
'SNR.1': 'SNR_Tc',
'SNR.2': 'SNR_W',
}
)
# Convert to an xaray dataset
ds = df.to_xarray()
# Add attributes to variables
# Height
ds['HT'].attrs['long_name'] = 'height_above_ground'
ds['HT'].attrs['units'] = 'km'
# Temperature
ds['T'].attrs['long_name'] = 'average_uncorrected_RASS_temperature'
ds['T'].attrs['units'] = 'degC'
ds['Tc'].attrs['long_name'] = 'average_corrected_RASS_temperature'
ds['Tc'].attrs['units'] = 'degC'
# Vertical motion (w)
ds['W'].attrs['long_name'] = 'average_vertical_wind'
ds['W'].attrs['units'] = 'm/s'
# Add time to our dataset
ds['time'] = time
# Add in our additional attributes
ds.attrs['site_identifier'] = site
ds.attrs['latitude'] = latitude
ds.attrs['longitude'] = longitude
ds.attrs['elevation'] = elevation
ds.attrs['beam_azimuth'] = beam_azimuth
ds.attrs['revision_number'] = version
ds.attrs[
'data_description'
] = 'https://psl.noaa.gov/data/obs/data/view_data_type_info.php?SiteID=ctd&DataOperationalID=5855&OperationalID=2371'
ds.attrs['consensus_average_time'] = consensus_average_time
ds.attrs['number_of_beams'] = int(number_of_beams)
ds.attrs['number_of_gates'] = int(number_of_gates)
ds.attrs['number_of_range_gates'] = int(number_of_range_gates)
ds.attrs['number_spectral_averages'] = int(number_spectral_averages)
ds.attrs['pulse_width'] = pulse_width
ds.attrs['inner_pulse_period'] = inner_pulse_period
ds.attrs['full_scale_doppler_value'] = full_scale_doppler
ds.attrs['spacing_of_gates'] = spacing_of_gates
return ds
def filter_list(list_of_strings):
"""
Parses a list of strings, remove empty strings, and return a numpy array
"""
return np.array(list(filter(None, list_of_strings)))
def parse_date_line(list_of_strings):
"""
Parses the date line in PSL files
"""
year, month, day, hour, minute, second, utc_offset = filter_list(
list_of_strings.split(' ')
).astype(int)
year += 2000
return datetime(year, month, day, hour, minute, second)
def read_psl_parsivel(files):
"""
Returns `xarray.Dataset` with stored data and metadata from a user-defined
NOAA PSL parsivel
Parameters
----------
files : str or list
Name of file(s) or urls to read.
Return
------
obj : Xarray.dataset
Standard Xarray dataset with the data for the parsivel
"""
# Define the names for the variables
names = ['time', 'B1', 'B2', 'B3', 'B4', 'B5', 'B6', 'B7', 'B8', 'B9', 'B10', 'B11', 'B12',
'B13', 'B14', 'B15', 'B16', 'B17', 'B18', 'B19', 'B20', 'B21', 'B22', 'B23', 'B24',
'B25', 'B26', 'B27', 'B28', 'B29', 'B30', 'B31', 'B32', 'blackout', 'good', 'bad',
'number_detected_particles', 'precip_rate', 'precip_amount', 'precip_accumulation',
'equivalent_radar_reflectivity', 'number_in_error', 'dirty', 'very_dirty', 'damaged',
'laserband_amplitude', 'laserband_amplitude_stdev', 'sensor_temperature', 'sensor_temperature_stdev',
'sensor_voltage', 'sensor_voltage_stdev', 'heating_current', 'heating_current_stdev', 'number_rain_particles',
'number_non_rain_particles', 'number_ambiguous_particles', 'precip_type']
# Define the particle sizes and class width sizes based on
# https://psl.noaa.gov/data/obs/data/view_data_type_info.php?SiteID=ctd&DataOperationalID=5890
vol_equiv_diam = [0.062, 0.187, 0.312, 0.437, 0.562, 0.687, 0.812, 0.937, 1.062, 1.187, 1.375,
1.625, 1.875, 2.125, 2.375, 2.75, 3.25, 3.75, 4.25, 4.75, 5.5, 6.5, 7.5, 8.5,
9.5, 11.0, 13.0, 15.0, 17.0, 19.0, 21.5, 24.5]
class_size_width = [0.125, 0.125, 0.125, 0.125, 0.125, 0.125, 0.125, 0.125, 0.125, 0.125,
0.250, 0.250, 0.250, 0.250, 0.250, 0.5, 0.5, 0.5, 0.5, 0.5,
1.0, 1.0, 1.0, 1.0, 1.0, 2.0, 2.0, 2.0, 2.0, 2.0, 3.0, 3.0]
if not isinstance(files, list):
files = [files]
# Loop through each file or url and append the dataframe into data for concatenations
data = []
end_time = []
for f in files:
df = pd.read_table(f, skiprows=[0, 1, 2], names=names, index_col=0, sep='\s+')
# Reading the table twice to get the date so it can be parsed appropriately
date = pd.read_table(f, nrows=0).to_string().split(' ')[-3]
time = df.index
start_time = []
form = '%y%j%H:%M:%S:%f'
for t in time:
start_time.append(pd.to_datetime(date + ':' + t.split('-')[0], format=form))
end_time.append(pd.to_datetime(date + ':' + t.split('-')[1], format=form))
df.index = start_time
data.append(df)
df = pd.concat(data)
# Create a 2D size distribution variable from all the B* variables
dsd = []
for n in names:
if 'B' not in n:
continue
dsd.append(list(df[n]))
# Convert the dataframe to xarray DataSet and add variables
obj = df.to_xarray()
obj = obj.rename({'index': 'time'})
long_name = 'Drop Size Distribution'
attrs = {'long_name': long_name, 'units': 'count'}
da = xr.DataArray(np.transpose(dsd), dims=['time', 'particle_size'], coords=[obj['time'].values, vol_equiv_diam])
obj['number_density_drops'] = da
attrs = {'long_name': 'Particle class size average', 'units': 'mm'}
da = xr.DataArray(class_size_width, dims=['particle_size'], coords=[vol_equiv_diam], attrs=attrs)
obj['class_size_width'] = da
attrs = {'long_name': 'Class size width', 'units': 'mm'}
da = xr.DataArray(vol_equiv_diam, dims=['particle_size'], coords=[vol_equiv_diam], attrs=attrs)
obj['particle_size'] = da
attrs = {'long_name': 'End time of averaging interval'}
da = xr.DataArray(end_time, dims=['time'], coords=[obj['time'].values], attrs=attrs)
obj['interval_end_time'] = da
# Define the attribuets and metadata and add into the DataSet
attrs = {'blackout': {'long_name': 'Number of samples excluded during PC clock sync', 'units': 'count'},
'good': {'long_name': 'Number of samples that passed QC checks', 'units': 'count'},
'bad': {'long_name': 'Number of samples that failed QC checks', 'units': 'count'},
'number_detected_particles': {'long_name': 'Total number of detected particles', 'units': 'count'},
'precip_rate': {'long_name': 'Precipitation rate', 'units': 'mm/hr'},
'precip_amount': {'long_name': 'Interval accumulation', 'units': 'mm'},
'precip_accumulation': {'long_name': 'Event accumulation', 'units': 'mm'},
'equivalent_radar_reflectivity': {'long_name': 'Radar Reflectivity', 'units': 'dB'},
'number_in_error': {'long_name': 'Number of samples that were reported dirt, very dirty, or damaged', 'units': 'count'},
'dirty': {'long_name': 'Laser glass is dirty but measurement is still possible', 'units': 'unitless'},
'very_dirty': {'long_name': 'Laser glass is dirty, partially covered no further measurements are possible', 'units': 'unitless'},
'damaged': {'long_name': 'Laser damaged', 'units': 'unitless'},
'laserband_amplitude': {'long_name': 'Average signal amplitude of the laser strip', 'units': 'unitless'},
'laserband_amplitude_stdev': {'long_name': 'Standard deviation of the signal amplitude of the laser strip', 'units': 'unitless'},
'sensor_temperature': {'long_name': 'Average sensor temperature', 'units': 'degC'},
'sensor_temperature_stdev': {'long_name': 'Standard deviation of sensor temperature', 'units': 'degC'},
'sensor_voltage': {'long_name': 'Sensor power supply voltage', 'units': 'V'},
'sensor_voltage_stdev': {'long_name': 'Standard deviation of the sensor power supply voltage', 'units': 'V'},
'heating_current': {'long_name': 'Average heating system current', 'units': 'A'},
'heating_current_stdev': {'long_name': 'Standard deviation of heating system current', 'units': 'A'},
'number_rain_particles': {'long_name': 'Number of particles detected as rain', 'units': 'unitless'},
'number_non_rain_particles': {'long_name': 'Number of particles detected not as rain', 'units': 'unitless'},
'number_ambiguous_particles': {'long_name': 'Number of particles detected as ambiguous', 'units': 'unitless'},
'precip_type': {'long_name': 'Precipitation type (1=rain; 2=mixed; 3=snow)', 'units': 'unitless'},
'number_density_drops': {'long_name': 'Drop Size Distribution', 'units': 'count'}}
for v in obj:
if v in attrs:
obj[v].attrs = attrs[v]
return obj
<|code_end|>
|
Subpanel plots greater then index 2 seems to fail.
When plotting a suplot of size (2, 3) I receive this error:
```
File ~\dev\ACT\act\plotting\timeseriesdisplay.py:608, in TimeSeriesDisplay.plot(self, field, dsname, subplot_index, cmap, set_title, add_nan, day_night_background, invert_y_axis, abs_limits, time_rng, y_rng, use_var_for_y, set_shading, assessment_overplot, overplot_marker, overplot_behind, overplot_markersize, assessment_overplot_category, assessment_overplot_category_color, force_line_plot, labels, cbar_label, cbar_h_adjust, secondary_y, y_axis_flag_meanings, colorbar_labels, **kwargs)
605 else:
606 self.time_rng = [xdata.min().values, xdata.max().values]
--> 608 self.set_xrng(self.time_rng, subplot_index)
610 # Set Y Limit
611 if y_rng is not None:
File ~\dev\ACT\act\plotting\timeseriesdisplay.py:224, in TimeSeriesDisplay.set_xrng(self, xrng, subplot_index)
222 warnings.filterwarnings('ignore', category=UserWarning)
223 self.axes[subplot_index].set_xlim(xrng)
--> 224 self.xrng[subplot_index, 0] = np.array(xrng[0], dtype='datetime64[D]')
225 self.xrng[subplot_index, 1] = np.array(xrng[1], dtype='datetime64[D]')
IndexError: index 2 is out of bounds for axis 0 with size 2
```
|
act/plotting/timeseriesdisplay.py
<|code_start|>"""
Stores the class for TimeSeriesDisplay.
"""
import datetime as dt
import warnings
from copy import deepcopy
from re import search, search as re_search
import textwrap
import matplotlib.dates as mdates
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from matplotlib import colors as mplcolors
import matplotlib as mpl
from mpl_toolkits.axes_grid1 import make_axes_locatable
from scipy.interpolate import NearestNDInterpolator
from ..qc.qcfilter import parse_bit
from ..utils import data_utils, datetime_utils as dt_utils
from ..utils.datetime_utils import determine_time_delta, reduce_time_ranges
from ..utils.geo_utils import get_sunrise_sunset_noon
from . import common
from .plot import Display
class TimeSeriesDisplay(Display):
"""
This subclass contains routines that are specific to plotting
time series plots from data. It is inherited from Display and therefore
contains all of Display's attributes and methods.
Examples
--------
To create a TimeSeriesDisplay with 3 rows, simply do:
.. code-block:: python
ds = act.read_netcdf(the_file)
disp = act.plotting.TimeSeriesDisplay(ds, subplot_shape=(3,), figsize=(15, 5))
The TimeSeriesDisplay constructor takes in the same keyword arguments as
plt.subplots. For more information on the plt.subplots keyword arguments,
see the `matplotlib documentation
<https://matplotlib.org/api/_as_gen/matplotlib.pyplot.subplots.html>`_.
If no subplot_shape is provided, then no figure or axis will be created
until add_subplots or plots is called.
"""
def __init__(self, obj, subplot_shape=(1,), ds_name=None, **kwargs):
super().__init__(obj, subplot_shape, ds_name, **kwargs)
def day_night_background(self, dsname=None, subplot_index=(0,)):
"""
Colorcodes the background according to sunrise/sunset.
Parameters
----------
dsname : None or str
If there is more than one datastream in the display object the
name of the datastream needs to be specified. If set to None and
there is only one datastream then ACT will use the sole datastream
in the object.
subplot_index : 1 or 2D tuple, list, or array
The index to the subplot to place the day and night background in.
"""
if dsname is None and len(self._obj.keys()) > 1:
raise ValueError(
'You must choose a datastream to derive the '
+ 'information needed for the day and night '
+ 'background when 2 or more datasets are in '
+ 'the display object.'
)
elif dsname is None:
dsname = list(self._obj.keys())[0]
# Get File Dates
try:
file_dates = self._obj[dsname].attrs['_file_dates']
except KeyError:
file_dates = []
if len(file_dates) == 0:
sdate = dt_utils.numpy_to_arm_date(self._obj[dsname].time.values[0])
edate = dt_utils.numpy_to_arm_date(self._obj[dsname].time.values[-1])
file_dates = [sdate, edate]
all_dates = dt_utils.dates_between(file_dates[0], file_dates[-1])
if self.axes is None:
raise RuntimeError('day_night_background requires the plot to ' 'be displayed.')
ax = self.axes[subplot_index]
# Find variable names for latitude and longitude
variables = list(self._obj[dsname].data_vars)
lat_name = [var for var in ['lat', 'latitude'] if var in variables]
lon_name = [var for var in ['lon', 'longitude'] if var in variables]
if len(lat_name) == 0:
lat_name = None
else:
lat_name = lat_name[0]
if len(lon_name) == 0:
lon_name = None
else:
lon_name = lon_name[0]
# Variable name does not match, look for standard_name declaration
if lat_name is None or lon_name is None:
for var in variables:
try:
if self._obj[dsname][var].attrs['standard_name'] == 'latitude':
lat_name = var
except KeyError:
pass
try:
if self._obj[dsname][var].attrs['standard_name'] == 'longitude':
lon_name = var
except KeyError:
pass
if lat_name is not None and lon_name is not None:
break
if lat_name is None or lon_name is None:
return
try:
if self._obj[dsname][lat_name].data.size > 1:
# Look for non-NaN values to use for locaiton. If not found use first value.
lat = self._obj[dsname][lat_name].values
index = np.where(np.isfinite(lat))[0]
if index.size == 0:
index = [0]
lat = float(lat[index[0]])
# Look for non-NaN values to use for locaiton. If not found use first value.
lon = self._obj[dsname][lon_name].values
index = np.where(np.isfinite(lon))[0]
if index.size == 0:
index = [0]
lon = float(lon[index[0]])
else:
lat = float(self._obj[dsname][lat_name].values)
lon = float(self._obj[dsname][lon_name].values)
except AttributeError:
return
if not np.isfinite(lat):
warnings.warn(f"Latitude value in dataset of '{lat}' is not finite. ", RuntimeWarning)
return
if not np.isfinite(lon):
warnings.warn(f"Longitude value in dataset of '{lon}' is not finite. ", RuntimeWarning)
return
lat_range = [-90, 90]
if not (lat_range[0] <= lat <= lat_range[1]):
warnings.warn(
f"Latitude value in dataset of '{lat}' not within acceptable "
f'range of {lat_range[0]} <= latitude <= {lat_range[1]}. ',
RuntimeWarning,
)
return
lon_range = [-180, 180]
if not (lon_range[0] <= lon <= lon_range[1]):
warnings.warn(
f"Longitude value in dataset of '{lon}' not within acceptable "
f'range of {lon_range[0]} <= longitude <= {lon_range[1]}. ',
RuntimeWarning,
)
return
# initialize the plot to a gray background for total darkness
rect = ax.patch
rect.set_facecolor('0.85')
# Get date ranges to plot
plot_dates = []
for f in all_dates:
for ii in [-1, 0, 1]:
plot_dates.append(f + dt.timedelta(days=ii))
# Get sunrise, sunset and noon times
sunrise, sunset, noon = get_sunrise_sunset_noon(lat, lon, plot_dates)
# Plot daylight
for ii in range(0, len(sunrise)):
ax.axvspan(sunrise[ii], sunset[ii], facecolor='#FFFFCC', zorder=0)
# Plot noon line
for ii in noon:
ax.axvline(x=ii, linestyle='--', color='y', zorder=1)
def set_xrng(self, xrng, subplot_index=(0,)):
"""
Sets the x range of the plot.
Parameters
----------
xrng : 2 number array
The x limits of the plot.
subplot_index : 1 or 2D tuple, list, or array
The index of the subplot to set the x range of.
"""
if self.axes is None:
raise RuntimeError('set_xrng requires the plot to be displayed.')
if not hasattr(self, 'xrng') and len(self.axes.shape) == 2:
self.xrng = np.zeros((self.axes.shape[0], self.axes.shape[1], 2), dtype='datetime64[D]')
elif not hasattr(self, 'xrng') and len(self.axes.shape) == 1:
self.xrng = np.zeros((self.axes.shape[0], 2), dtype='datetime64[D]')
with warnings.catch_warnings():
warnings.filterwarnings('ignore', category=UserWarning)
self.axes[subplot_index].set_xlim(xrng)
self.xrng[subplot_index, 0] = np.array(xrng[0], dtype='datetime64[D]')
self.xrng[subplot_index, 1] = np.array(xrng[1], dtype='datetime64[D]')
def set_yrng(self, yrng, subplot_index=(0,)):
"""
Sets the y range of the plot.
Parameters
----------
yrng : 2 number array
The y limits of the plot.
subplot_index : 1 or 2D tuple, list, or array
The index of the subplot to set the x range of.
"""
if self.axes is None:
raise RuntimeError('set_yrng requires the plot to be displayed.')
if not hasattr(self, 'yrng') and len(self.axes.shape) == 2:
self.yrng = np.zeros((self.axes.shape[0], self.axes.shape[1], 2))
elif not hasattr(self, 'yrng') and len(self.axes.shape) == 1:
self.yrng = np.zeros((self.axes.shape[0], 2))
if yrng[0] == yrng[1]:
yrng[1] = yrng[1] + 1
self.axes[subplot_index].set_ylim(yrng)
self.yrng[subplot_index, :] = yrng
def plot(
self,
field,
dsname=None,
subplot_index=(0,),
cmap=None,
set_title=None,
add_nan=False,
day_night_background=False,
invert_y_axis=False,
abs_limits=(None, None),
time_rng=None,
y_rng=None,
use_var_for_y=None,
set_shading='auto',
assessment_overplot=False,
overplot_marker='.',
overplot_behind=False,
overplot_markersize=6,
assessment_overplot_category={
'Incorrect': ['Bad', 'Incorrect'],
'Suspect': ['Indeterminate', 'Suspect'],
},
assessment_overplot_category_color={'Incorrect': 'red', 'Suspect': 'orange'},
force_line_plot=False,
labels=False,
cbar_label=None,
cbar_h_adjust=None,
secondary_y=False,
y_axis_flag_meanings=False,
colorbar_labels=None,
**kwargs,
):
"""
Makes a timeseries plot. If subplots have not been added yet, an axis
will be created assuming that there is only going to be one plot.
Parameters
----------
field : str
The name of the field to plot.
dsname : None or str
If there is more than one datastream in the display object the
name of the datastream needs to be specified. If set to None and
there is only one datastream ACT will use the sole datastream
in the object.
subplot_index : 1 or 2D tuple, list, or array
The index of the subplot to set the x range of.
cmap : matplotlib colormap
The colormap to use.
set_title : str
The title for the plot.
add_nan : bool
Set to True to fill in data gaps with NaNs.
day_night_background : bool
Set to True to fill in a color coded background.
according to the time of day.
abs_limits : tuple or list
Sets the bounds on plot limits even if data values exceed
those limits. Set to (ymin,ymax). Use None if only setting
minimum or maximum limit, i.e. (22., None).
time_rng : tuple or list
List or tuple with (min, max) values to set the x-axis range
limits.
y_rng : tuple or list
List or tuple with (min, max) values to set the y-axis range
use_var_for_y : str
Set this to the name of a data variable in the Dataset to use as
the y-axis variable instead of the default dimension. Useful for
instances where data has an index-based dimension instead of a
height-based dimension. If shapes of arrays do not match it will
automatically revert back to the original ydata.
set_shading : string
Option to to set the matplotlib.pcolormesh shading parameter.
Default to 'auto'
assessment_overplot : boolean
Option to overplot quality control colored symbols over plotted
data using flag_assessment categories.
overplot_marker : str
Marker to use for overplot symbol.
overplot_behind : bool
Place the overplot marker behind the data point.
overplot_markersize : float or int
Size of overplot marker. If overplot_behind or force_line_plot
are set the marker size will be double overplot_markersize so
the color is visible.
assessment_overplot_category : dict
Lookup to categorize assessments into groups. This allows using
multiple terms for the same quality control level of failure.
Also allows adding more to the defaults.
assessment_overplot_category_color : dict
Lookup to match overplot category color to assessment grouping.
force_line_plot : boolean
Option to plot 2D data as 1D line plots.
labels : boolean or list
Option to overwrite the legend labels. Must have same dimensions as
number of lines plotted.
cbar_label : str
Option to overwrite default colorbar label.
cbar_h_adjust : float
Option to adjust location of colorbar horizontally. Positive values
move to right negative values move to left.
secondary_y : boolean
Option to plot on secondary y axis.
y_axis_flag_meanings : boolean or int
When set to True and plotting state variable with flag_values and
flag_meanings attribures will replace y axis numerical values
with flag_meanings value. Set to a positive number larger than 1
to indicate maximum word length to use. If text is longer that the
value and has space characters will split text over multiple lines.
colorbar_labels : dict
A dictionary containing values for plotting a 2D array of state variables.
The dictionary uses data values as keys and a dictionary containing keys
'text' and 'color' for each data value to plot.
Example:
{0: {'text': 'Clear sky', 'color': 'white'},
1: {'text': 'Liquid', 'color': 'green'},
2: {'text': 'Ice', 'color': 'blue'},
3: {'text': 'Mixed phase', 'color': 'purple'}}
**kwargs : keyword arguments
The keyword arguments for :func:`plt.plot` (1D timeseries) or
:func:`plt.pcolormesh` (2D timeseries).
Returns
-------
ax : matplotlib axis handle
The matplotlib axis handle of the plot.
"""
if dsname is None and len(self._obj.keys()) > 1:
raise ValueError(
'You must choose a datastream when there are 2 '
'or more datasets in the TimeSeriesDisplay '
'object.'
)
elif dsname is None:
dsname = list(self._obj.keys())[0]
if y_axis_flag_meanings:
kwargs['linestyle'] = ''
# Get data and dimensions
data = self._obj[dsname][field]
dim = list(self._obj[dsname][field].dims)
xdata = self._obj[dsname][dim[0]]
if 'units' in data.attrs:
ytitle = ''.join(['(', data.attrs['units'], ')'])
else:
ytitle = field
if cbar_label is None:
cbar_default = ytitle
if len(dim) > 1:
if use_var_for_y is None:
ydata = self._obj[dsname][dim[1]]
else:
ydata = self._obj[dsname][use_var_for_y]
ydata_dim1 = self._obj[dsname][dim[1]]
if np.shape(ydata) != np.shape(ydata_dim1):
ydata = ydata_dim1
units = ytitle
if 'units' in ydata.attrs.keys():
units = ydata.attrs['units']
ytitle = ''.join(['(', units, ')'])
else:
units = ''
ytitle = dim[1]
# Create labels if 2d as 1d
if force_line_plot is True:
if labels is True:
labels = [' '.join([str(d), units]) for d in ydata.values]
ytitle = f"({data.attrs['units']})"
ydata = None
else:
ydata = None
# Get the current plotting axis, add day/night background and plot data
if self.fig is None:
self.fig = plt.figure()
if self.axes is None:
self.axes = np.array([plt.axes()])
self.fig.add_axes(self.axes[0])
# Set up secondary y axis if requested
if secondary_y is False:
ax = self.axes[subplot_index]
else:
ax = self.axes[subplot_index].twinx()
if colorbar_labels is not None:
flag_values = list(colorbar_labels.keys())
flag_meanings = [value['text'] for key, value in colorbar_labels.items()]
cbar_colors = [value['color'] for key, value in colorbar_labels.items()]
cmap = mpl.colors.ListedColormap(cbar_colors)
for ii, flag_meaning in enumerate(flag_meanings):
if len(flag_meaning) > 20:
flag_meaning = textwrap.fill(flag_meaning, width=20)
flag_meanings[ii] = flag_meaning
else:
flag_values = None
flag_meanings = None
cbar_colors = None
if ydata is None:
# Add in nans to ensure the data does not connect the line.
if add_nan is True:
xdata, data = data_utils.add_in_nan(xdata, data)
if day_night_background is True:
self.day_night_background(subplot_index=subplot_index, dsname=dsname)
# If limiting data being plotted use masked arrays
# Need to do it this way because of autoscale() method
if abs_limits[0] is not None and abs_limits[1] is not None:
data = np.ma.masked_outside(data, abs_limits[0], abs_limits[1])
elif abs_limits[0] is not None and abs_limits[1] is None:
data = np.ma.masked_less_equal(data, abs_limits[0])
elif abs_limits[0] is None and abs_limits[1] is not None:
data = np.ma.masked_greater_equal(data, abs_limits[1])
# Plot the data
if 'marker' not in kwargs.keys():
kwargs['marker'] = '.'
lines = ax.plot(xdata, data, **kwargs)
# Check if we need to call legend method after plotting. This is only
# called when no assessment overplot is called.
add_legend = False
if 'label' in kwargs.keys():
add_legend = True
# Overplot failing data if requested
if assessment_overplot:
# If we are doing forced line plot from 2D data need to manage
# legend lables. Will make arrays to hold labels of QC failing
# because not set when labels not set.
if not isinstance(labels, list) and add_legend is False:
labels = []
lines = []
# For forced line plot need to plot QC behind point instead of
# on top of point.
zorder = None
if force_line_plot or overplot_behind:
zorder = 0
overplot_markersize *= 2.0
for assessment, categories in assessment_overplot_category.items():
flag_data = self._obj[dsname].qcfilter.get_masked_data(
field, rm_assessments=categories, return_inverse=True
)
if np.invert(flag_data.mask).any() and np.isfinite(flag_data).any():
try:
flag_data.mask = np.logical_or(data.mask, flag_data.mask)
except AttributeError:
pass
qc_ax = ax.plot(
xdata,
flag_data,
marker=overplot_marker,
linestyle='',
markersize=overplot_markersize,
color=assessment_overplot_category_color[assessment],
label=assessment,
zorder=zorder,
)
# If labels keyword is set need to add labels for calling legend
if isinstance(labels, list):
# If plotting forced_line_plot need to subset the Line2D object
# so we don't have more than one added to legend.
if len(qc_ax) > 1:
lines.extend(qc_ax[:1])
else:
lines.extend(qc_ax)
labels.append(assessment)
add_legend = True
# Add legend if labels are available
if isinstance(labels, list):
ax.legend(lines, labels)
elif add_legend:
ax.legend()
# Change y axis to text from flag_meanings if requested.
if y_axis_flag_meanings:
flag_meanings = self._obj[dsname][field].attrs['flag_meanings']
flag_values = self._obj[dsname][field].attrs['flag_values']
# If keyword is larger than 1 assume this is the maximum character length
# desired and insert returns to wrap text.
if y_axis_flag_meanings > 1:
for ii, flag_meaning in enumerate(flag_meanings):
if len(flag_meaning) > y_axis_flag_meanings:
flag_meaning = textwrap.fill(flag_meaning, width=y_axis_flag_meanings)
flag_meanings[ii] = flag_meaning
ax.set_yticks(flag_values)
ax.set_yticklabels(flag_meanings)
else:
# Add in nans to ensure the data are not streaking
if add_nan is True:
xdata, data = data_utils.add_in_nan(xdata, data)
# Sets shading parameter to auto. Matplotlib will check deminsions.
# If X,Y and C are same deminsions shading is set to nearest.
# If X and Y deminsions are 1 greater than C shading is set to flat.
if 'edgecolors' not in kwargs.keys():
kwargs['edgecolors'] = 'face'
mesh = ax.pcolormesh(
np.asarray(xdata),
ydata,
data.transpose(),
shading=set_shading,
cmap=cmap,
**kwargs,
)
# Set Title
if set_title is None:
if isinstance(self._obj[dsname].time.values[0], np.datetime64):
set_title = ' '.join(
[
dsname,
field,
'on',
dt_utils.numpy_to_arm_date(self._obj[dsname].time.values[0]),
]
)
else:
date_result = search(
r'\d{4}-\d{1,2}-\d{1,2}', self._obj[dsname].time.attrs['units']
)
if date_result is not None:
set_title = ' '.join([dsname, field, 'on', date_result.group(0)])
else:
set_title = ' '.join([dsname, field])
if secondary_y is False:
ax.set_title(set_title)
# Set YTitle
if not y_axis_flag_meanings:
ax.set_ylabel(ytitle)
# Set X Limit - We want the same time axes for all subplots
if not hasattr(self, 'time_rng'):
if time_rng is not None:
self.time_rng = list(time_rng)
else:
self.time_rng = [xdata.min().values, xdata.max().values]
self.set_xrng(self.time_rng, subplot_index)
# Set Y Limit
if y_rng is not None:
self.set_yrng(y_rng)
if hasattr(self, 'yrng'):
# Make sure that the yrng is not just the default
if ydata is None:
if abs_limits[0] is not None or abs_limits[1] is not None:
our_data = data
else:
our_data = data.values
else:
our_data = ydata
finite = np.isfinite(our_data)
if finite.any():
our_data = our_data[finite]
if invert_y_axis is False:
yrng = [np.min(our_data), np.max(our_data)]
else:
yrng = [np.max(our_data), np.min(our_data)]
else:
yrng = [0, 1]
# Check if current range is outside of new range an only set
# values that work for all data plotted.
if isinstance(yrng[0], np.datetime64):
yrng = mdates.datestr2num([str(yrng[0]), str(yrng[1])])
current_yrng = ax.get_ylim()
if yrng[0] > current_yrng[0]:
yrng[0] = current_yrng[0]
if yrng[1] < current_yrng[1]:
yrng[1] = current_yrng[1]
# Set y range the normal way if not secondary y
# If secondary, just use set_ylim
if secondary_y is False:
self.set_yrng(yrng, subplot_index)
else:
ax.set_ylim(yrng)
# Set X Format
if len(subplot_index) == 1:
days = self.xrng[subplot_index, 1] - self.xrng[subplot_index, 0]
else:
days = (
self.xrng[subplot_index[0], subplot_index[1], 1]
- self.xrng[subplot_index[0], subplot_index[1], 0]
)
myFmt = common.get_date_format(days)
ax.xaxis.set_major_formatter(myFmt)
# Set X format - We want the same time axes for all subplots
if not hasattr(self, 'time_fmt'):
self.time_fmt = myFmt
# Put on an xlabel, but only if we are making the bottom-most plot
if subplot_index[0] == self.axes.shape[0] - 1:
ax.set_xlabel('Time [UTC]')
if ydata is not None:
if cbar_label is None:
cbar_title = cbar_default
else:
cbar_title = ''.join(['(', cbar_label, ')'])
if colorbar_labels is not None:
cbar_title = None
cbar = self.add_colorbar(mesh, title=cbar_title, subplot_index=subplot_index,
values=flag_values, pad=cbar_h_adjust)
cbar.set_ticks(flag_values)
cbar.set_ticklabels(flag_meanings)
cbar.ax.tick_params(labelsize=10)
else:
self.add_colorbar(mesh, title=cbar_title, subplot_index=subplot_index,
pad=cbar_h_adjust)
return ax
def plot_barbs_from_spd_dir(
self, speed_field, direction_field, pres_field=None, dsname=None, **kwargs
):
"""
This procedure will make a wind barb plot timeseries.
If a pressure field is given and the wind fields are 1D, which, for
example, would occur if one wants to plot a timeseries of
rawinsonde data, then a time-height cross section of
winds will be made.
Note: This procedure calls plot_barbs_from_u_v and will take in the
same keyword arguments as that procedure.
Parameters
----------
speed_field : str
The name of the field specifying the wind speed in m/s.
direction_field : str
The name of the field specifying the wind direction in degrees.
0 degrees is defined to be north and increases clockwise like
what is used in standard meteorological notation.
pres_field : str
The name of the field specifying pressure or height. If using
height coordinates, then we recommend setting invert_y_axis
to False.
dsname : str
The name of the datastream to plot. Setting to None will make
ACT attempt to autodetect this.
kwargs : dict
Any additional keyword arguments will be passed into
:func:`act.plotting.TimeSeriesDisplay.plot_barbs_from_u_and_v`.
Returns
-------
the_ax : matplotlib axis handle
The handle to the axis where the plot was made on.
Examples
--------
..code-block :: python
sonde_ds = act.io.armfiles.read_netcdf(
act.tests.sample_files.EXAMPLE_TWP_SONDE_WILDCARD)
BarbDisplay = act.plotting.TimeSeriesDisplay(
{'sonde_darwin': sonde_ds}, figsize=(10,5))
BarbDisplay.plot_barbs_from_spd_dir('deg', 'wspd', 'pres',
num_barbs_x=20)
"""
if dsname is None and len(self._obj.keys()) > 1:
raise ValueError(
'You must choose a datastream when there are 2 '
'or more datasets in the TimeSeriesDisplay '
'object.'
)
elif dsname is None:
dsname = list(self._obj.keys())[0]
# Make temporary field called tempu, tempv
spd = self._obj[dsname][speed_field]
dir = self._obj[dsname][direction_field]
tempu = -np.sin(np.deg2rad(dir)) * spd
tempv = -np.cos(np.deg2rad(dir)) * spd
self._obj[dsname]['temp_u'] = deepcopy(self._obj[dsname][speed_field])
self._obj[dsname]['temp_v'] = deepcopy(self._obj[dsname][speed_field])
self._obj[dsname]['temp_u'].values = tempu
self._obj[dsname]['temp_v'].values = tempv
the_ax = self.plot_barbs_from_u_v('temp_u', 'temp_v', pres_field, dsname, **kwargs)
del self._obj[dsname]['temp_u'], self._obj[dsname]['temp_v']
return the_ax
def plot_barbs_from_u_v(
self,
u_field,
v_field,
pres_field=None,
dsname=None,
subplot_index=(0,),
set_title=None,
day_night_background=False,
invert_y_axis=True,
num_barbs_x=20,
num_barbs_y=20,
use_var_for_y=None,
**kwargs,
):
"""
This function will plot a wind barb timeseries from u and v wind
data. If pres_field is given, a time-height series will be plotted
from 1-D wind data.
Parameters
----------
u_field : str
The name of the field containing the U component of the wind.
v_field : str
The name of the field containing the V component of the wind.
pres_field : str or None
The name of the field containing the pressure or height. Set
to None to not use this.
dsname : str or None
The name of the datastream to plot. Setting to None will make
ACT automatically try to determine this.
subplot_index : 2-tuple
The index of the subplot to make the plot on.
set_title : str or None
The title of the plot.
day_night_background : bool
Set to True to plot a day/night background.
invert_y_axis : bool
Set to True to invert the y axis (i.e. for plotting pressure as
the height coordinate).
num_barbs_x : int
The number of wind barbs to plot in the x axis.
num_barbs_y : int
The number of wind barbs to plot in the y axis.
cmap : matplotlib.colors.LinearSegmentedColormap
A color map to use with wind barbs. If this is set the plt.barbs
routine will be passed the C parameter scaled as sqrt of sum of the
squares and used with the passed in color map. A colorbar will also
be added. Setting the limits of the colorbar can be done with 'clim'.
Setting this changes the wind barbs from black to colors.
use_var_for_y : str
Set this to the name of a data variable in the Dataset to use as the
y-axis variable instead of the default dimension. Useful for instances
where data has an index-based dimension instead of a height-based
dimension. If shapes of arrays do not match it will automatically
revert back to the original ydata.
**kwargs : keyword arguments
Additional keyword arguments will be passed into plt.barbs.
Returns
-------
ax : matplotlib axis handle
The axis handle that contains the reference to the
constructed plot.
"""
if dsname is None and len(self._obj.keys()) > 1:
raise ValueError(
'You must choose a datastream when there are 2 '
'or more datasets in the TimeSeriesDisplay '
'object.'
)
elif dsname is None:
dsname = list(self._obj.keys())[0]
# Get data and dimensions
u = self._obj[dsname][u_field].values
v = self._obj[dsname][v_field].values
dim = list(self._obj[dsname][u_field].dims)
xdata = self._obj[dsname][dim[0]].values
num_x = xdata.shape[-1]
barb_step_x = round(num_x / num_barbs_x)
if barb_step_x == 0:
barb_step_x = 1
if len(dim) > 1 and pres_field is None:
if use_var_for_y is None:
ydata = self._obj[dsname][dim[1]]
else:
ydata = self._obj[dsname][use_var_for_y]
ydata_dim1 = self._obj[dsname][dim[1]]
if np.shape(ydata) != np.shape(ydata_dim1):
ydata = ydata_dim1
if 'units' in ydata.attrs:
units = ydata.attrs['units']
else:
units = ''
ytitle = ''.join(['(', units, ')'])
num_y = ydata.shape[0]
barb_step_y = round(num_y / num_barbs_y)
if barb_step_y == 0:
barb_step_y = 1
xdata, ydata = np.meshgrid(xdata, ydata, indexing='ij')
elif pres_field is not None:
# What we will do here is do a nearest-neighbor interpolation
# for each member of the series. Coordinates are time, pressure
pres = self._obj[dsname][pres_field]
u_interp = NearestNDInterpolator((xdata, pres.values), u, rescale=True)
v_interp = NearestNDInterpolator((xdata, pres.values), v, rescale=True)
barb_step_x = 1
barb_step_y = 1
x_times = pd.date_range(xdata.min(), xdata.max(), periods=num_barbs_x)
if num_barbs_y == 1:
y_levels = pres.mean()
else:
y_levels = np.linspace(np.nanmin(pres), np.nanmax(pres), num_barbs_y)
xdata, ydata = np.meshgrid(x_times, y_levels, indexing='ij')
u = u_interp(xdata, ydata)
v = v_interp(xdata, ydata)
if 'units' in pres.attrs:
units = pres.attrs['units']
else:
units = ''
ytitle = ''.join(['(', units, ')'])
else:
ydata = None
# Get the current plotting axis, add day/night background and plot data
if self.fig is None:
self.fig = plt.figure()
if self.axes is None:
self.axes = np.array([plt.axes()])
self.fig.add_axes(self.axes[0])
if ydata is None:
ydata = np.ones(xdata.shape)
if 'cmap' in kwargs.keys():
map_color = np.sqrt(np.power(u[::barb_step_x], 2) + np.power(v[::barb_step_x], 2))
map_color[np.isnan(map_color)] = 0
ax = self.axes[subplot_index].barbs(
xdata[::barb_step_x],
ydata[::barb_step_x],
u[::barb_step_x],
v[::barb_step_x],
map_color,
**kwargs,
)
plt.colorbar(
ax,
ax=[self.axes[subplot_index]],
label='Wind Speed (' + self._obj[dsname][u_field].attrs['units'] + ')',
)
else:
self.axes[subplot_index].barbs(
xdata[::barb_step_x],
ydata[::barb_step_x],
u[::barb_step_x],
v[::barb_step_x],
**kwargs,
)
self.axes[subplot_index].set_yticks([])
else:
if 'cmap' in kwargs.keys():
map_color = np.sqrt(
np.power(u[::barb_step_x, ::barb_step_y], 2)
+ np.power(v[::barb_step_x, ::barb_step_y], 2)
)
map_color[np.isnan(map_color)] = 0
ax = self.axes[subplot_index].barbs(
xdata[::barb_step_x, ::barb_step_y],
ydata[::barb_step_x, ::barb_step_y],
u[::barb_step_x, ::barb_step_y],
v[::barb_step_x, ::barb_step_y],
map_color,
**kwargs,
)
plt.colorbar(
ax,
ax=[self.axes[subplot_index]],
label='Wind Speed (' + self._obj[dsname][u_field].attrs['units'] + ')',
)
else:
ax = self.axes[subplot_index].barbs(
xdata[::barb_step_x, ::barb_step_y],
ydata[::barb_step_x, ::barb_step_y],
u[::barb_step_x, ::barb_step_y],
v[::barb_step_x, ::barb_step_y],
**kwargs,
)
if day_night_background is True:
self.day_night_background(subplot_index=subplot_index, dsname=dsname)
# Set Title
if set_title is None:
set_title = ' '.join(
[
dsname,
'on',
dt_utils.numpy_to_arm_date(self._obj[dsname].time.values[0]),
]
)
self.axes[subplot_index].set_title(set_title)
# Set YTitle
if 'ytitle' in locals():
self.axes[subplot_index].set_ylabel(ytitle)
# Set X Limit - We want the same time axes for all subplots
time_rng = [xdata.min(), xdata.max()]
self.set_xrng(time_rng, subplot_index)
# Set Y Limit
if hasattr(self, 'yrng'):
# Make sure that the yrng is not just the default
if not np.all(self.yrng[subplot_index] == 0):
self.set_yrng(self.yrng[subplot_index], subplot_index)
else:
if ydata is None:
our_data = xdata
else:
our_data = ydata
if np.isfinite(our_data).any():
if invert_y_axis is False:
yrng = [np.nanmin(our_data), np.nanmax(our_data)]
else:
yrng = [np.nanmax(our_data), np.nanmin(our_data)]
else:
yrng = [0, 1]
self.set_yrng(yrng, subplot_index)
# Set X Format
if len(subplot_index) == 1:
days = self.xrng[subplot_index, 1] - self.xrng[subplot_index, 0]
else:
days = (
self.xrng[subplot_index[0], subplot_index[1], 1]
- self.xrng[subplot_index[0], subplot_index[1], 0]
)
# Put on an xlabel, but only if we are making the bottom-most plot
if subplot_index[0] == self.axes.shape[0] - 1:
self.axes[subplot_index].set_xlabel('Time [UTC]')
myFmt = common.get_date_format(days)
self.axes[subplot_index].xaxis.set_major_formatter(myFmt)
return self.axes[subplot_index]
def plot_time_height_xsection_from_1d_data(
self,
data_field,
pres_field,
dsname=None,
subplot_index=(0,),
set_title=None,
day_night_background=False,
num_time_periods=20,
num_y_levels=20,
invert_y_axis=True,
cbar_label=None,
set_shading='auto',
**kwargs,
):
"""
This will plot a time-height cross section from 1D datasets using
nearest neighbor interpolation on a regular time by height grid.
All that is needed are a data variable and a height variable.
Parameters
----------
data_field : str
The name of the field to plot.
pres_field : str
The name of the height or pressure field to plot.
dsname : str or None
The name of the datastream to plot
subplot_index : 2-tuple
The index of the subplot to create the plot on.
set_title : str or None
The title of the plot.
day_night_background : bool
Set to true to plot the day/night background.
num_time_periods : int
Set to determine how many time periods. Setting to None
will do one time period per day.
num_y_levels : int
The number of levels in the y axis to use.
invert_y_axis : bool
Set to true to invert the y-axis (recommended for
pressure coordinates).
cbar_label : str
Option to overwrite default colorbar label.
set_shading : string
Option to to set the matplotlib.pcolormesh shading parameter.
Default to 'auto'
**kwargs : keyword arguments
Additional keyword arguments will be passed
into :func:`plt.pcolormesh`
Returns
-------
ax : matplotlib axis handle
The matplotlib axis handle pointing to the plot.
"""
if dsname is None and len(self._obj.keys()) > 1:
raise ValueError(
'You must choose a datastream when there are 2'
'or more datasets in the TimeSeriesDisplay'
'object.'
)
elif dsname is None:
dsname = list(self._obj.keys())[0]
dim = list(self._obj[dsname][data_field].dims)
if len(dim) > 1:
raise ValueError(
'plot_time_height_xsection_from_1d_data only '
'supports 1-D datasets. For datasets with 2 or '
'more dimensions use plot().'
)
# Get data and dimensions
data = self._obj[dsname][data_field].values
xdata = self._obj[dsname][dim[0]].values
# What we will do here is do a nearest-neighbor interpolation for each
# member of the series. Coordinates are time, pressure
pres = self._obj[dsname][pres_field]
u_interp = NearestNDInterpolator((xdata, pres.values), data, rescale=True)
# Mask points where we have no data
# Count number of unique days
x_times = pd.date_range(xdata.min(), xdata.max(), periods=num_time_periods)
y_levels = np.linspace(np.nanmin(pres), np.nanmax(pres), num_y_levels)
tdata, ydata = np.meshgrid(x_times, y_levels, indexing='ij')
data = u_interp(tdata, ydata)
ytitle = ''.join(['(', pres.attrs['units'], ')'])
units = data_field + ' (' + self._obj[dsname][data_field].attrs['units'] + ')'
# Get the current plotting axis, add day/night background and plot data
if self.fig is None:
self.fig = plt.figure()
if self.axes is None:
self.axes = np.array([plt.axes()])
self.fig.add_axes(self.axes[0])
mesh = self.axes[subplot_index].pcolormesh(
x_times, y_levels, np.transpose(data), shading=set_shading, **kwargs
)
if day_night_background is True:
self.day_night_background(subplot_index=subplot_index, dsname=dsname)
# Set Title
if set_title is None:
set_title = ' '.join(
[
dsname,
'on',
dt_utils.numpy_to_arm_date(self._obj[dsname].time.values[0]),
]
)
self.axes[subplot_index].set_title(set_title)
# Set YTitle
if 'ytitle' in locals():
self.axes[subplot_index].set_ylabel(ytitle)
# Set X Limit - We want the same time axes for all subplots
time_rng = [x_times[-1], x_times[0]]
self.set_xrng(time_rng, subplot_index)
# Set Y Limit
if hasattr(self, 'yrng'):
# Make sure that the yrng is not just the default
if not np.all(self.yrng[subplot_index] == 0):
self.set_yrng(self.yrng[subplot_index], subplot_index)
else:
if ydata is None:
our_data = data.values
else:
our_data = ydata
if np.isfinite(our_data).any():
if invert_y_axis is False:
yrng = [np.nanmin(our_data), np.nanmax(our_data)]
else:
yrng = [np.nanmax(our_data), np.nanmin(our_data)]
else:
yrng = [0, 1]
self.set_yrng(yrng, subplot_index)
# Set X Format
if len(subplot_index) == 1:
days = self.xrng[subplot_index, 1] - self.xrng[subplot_index, 0]
else:
days = (
self.xrng[subplot_index[0], subplot_index[1], 1]
- self.xrng[subplot_index[0], subplot_index[1], 0]
)
# Put on an xlabel, but only if we are making the bottom-most plot
if subplot_index[0] == self.axes.shape[0] - 1:
self.axes[subplot_index].set_xlabel('Time [UTC]')
if ydata is not None:
if cbar_label is None:
self.add_colorbar(mesh, title=units, subplot_index=subplot_index)
else:
self.add_colorbar(mesh, title=cbar_label, subplot_index=subplot_index)
myFmt = common.get_date_format(days)
self.axes[subplot_index].xaxis.set_major_formatter(myFmt)
return self.axes[subplot_index]
def time_height_scatter(
self,
data_field=None,
dsname=None,
cmap='rainbow',
alt_label=None,
alt_field='alt',
cb_label=None,
**kwargs,
):
"""
Create a time series plot of altitude and data variable with
color also indicating value with a color bar. The Color bar is
positioned to serve both as the indicator of the color intensity
and the second y-axis.
Parameters
----------
data_field : str
Name of data field in the object to plot on second y-axis.
height_field : str
Name of height field in the object to plot on first y-axis.
dsname : str or None
The name of the datastream to plot.
cmap : str
Colorbar color map to use.
alt_label : str
Altitude first y-axis label to use. If None, will try to use
long_name and units.
alt_field : str
Label for field in the object to plot on first y-axis.
cb_label : str
Colorbar label to use. If not set will try to use
long_name and units.
**kwargs : keyword arguments
Any other keyword arguments that will be passed
into TimeSeriesDisplay.plot module when the figure
is made.
"""
if dsname is None and len(self._obj.keys()) > 1:
raise ValueError(
'You must choose a datastream when there are 2 '
'or more datasets in the TimeSeriesDisplay '
'object.'
)
elif dsname is None:
dsname = list(self._obj.keys())[0]
# Get data and dimensions
data = self._obj[dsname][data_field]
altitude = self._obj[dsname][alt_field]
dim = list(self._obj[dsname][data_field].dims)
xdata = self._obj[dsname][dim[0]]
if alt_label is None:
try:
alt_label = altitude.attrs['long_name'] + ''.join(
[' (', altitude.attrs['units'], ')']
)
except KeyError:
alt_label = alt_field
if cb_label is None:
try:
cb_label = data.attrs['long_name'] + ''.join([' (', data.attrs['units'], ')'])
except KeyError:
cb_label = data_field
colorbar_map = plt.cm.get_cmap(cmap)
self.fig.subplots_adjust(left=0.1, right=0.86, bottom=0.16, top=0.91)
ax1 = self.plot(alt_field, color='black', **kwargs)
ax1.set_ylabel(alt_label)
ax2 = ax1.twinx()
sc = ax2.scatter(xdata.values, data.values, c=data.values, marker='.', cmap=colorbar_map)
cbaxes = self.fig.add_axes(
[
self.fig.subplotpars.right + 0.02,
self.fig.subplotpars.bottom,
0.02,
self.fig.subplotpars.top - self.fig.subplotpars.bottom,
]
)
cbar = plt.colorbar(sc, cax=cbaxes)
ax2.set_ylim(cbar.mappable.get_clim())
cbar.ax.set_ylabel(cb_label)
ax2.set_yticklabels([])
return self.axes[0]
def qc_flag_block_plot(
self,
data_field=None,
dsname=None,
subplot_index=(0,),
time_rng=None,
assessment_color=None,
edgecolor='face',
set_shading='auto',
**kwargs,
):
"""
Create a time series plot of embedded quality control values
using broken barh plotting.
Parameters
----------
data_field : str
Name of data field in the object to plot corresponding quality
control.
dsname : None or str
If there is more than one datastream in the display object the
name of the datastream needs to be specified. If set to None and
there is only one datastream ACT will use the sole datastream
in the object.
subplot_index : 1 or 2D tuple, list, or array
The index of the subplot to set the x range of.
time_rng : tuple or list
List or tuple with (min, max) values to set the x-axis range limits.
assessment_color : dict
Dictionary lookup to override default assessment to color. Make sure
assessment work is correctly set with case syntax.
edgecolor : str or list
Color name, list of color names or 'face' as defined in matplotlib.axes.Axes.broken_barh
set_shading : string
Option to to set the matplotlib.pcolormesh shading parameter.
Default to 'auto'
**kwargs : keyword arguments
The keyword arguments for :func:`plt.broken_barh`.
"""
# Color to plot associated with assessment.
color_lookup = {
'Bad': 'red',
'Incorrect': 'red',
'Indeterminate': 'orange',
'Suspect': 'orange',
'Missing': 'darkgray',
'Not Failing': 'green',
'Acceptable': 'green',
}
if assessment_color is not None:
for asses, color in assessment_color.items():
color_lookup[asses] = color
if asses == 'Incorrect':
color_lookup['Bad'] = color
if asses == 'Suspect':
color_lookup['Indeterminate'] = color
# Set up list of test names to use for missing values
missing_val_long_names = [
'Value equal to missing_value*',
'Value set to missing_value*',
'Value is equal to missing_value*',
'Value is set to missing_value*',
]
if dsname is None and len(self._obj.keys()) > 1:
raise ValueError(
'You must choose a datastream when there are 2 '
'or more datasets in the TimeSeriesDisplay '
'object.'
)
elif dsname is None:
dsname = list(self._obj.keys())[0]
# Set up or get current plot figure
if self.fig is None:
self.fig = plt.figure()
# Set up or get current axes
if self.axes is None:
self.axes = np.array([plt.axes()])
self.fig.add_axes(self.axes[0])
ax = self.axes[subplot_index]
# Set X Limit - We want the same time axes for all subplots
data = self._obj[dsname][data_field]
dim = list(self._obj[dsname][data_field].dims)
xdata = self._obj[dsname][dim[0]]
# Get data and attributes
qc_data_field = self._obj[dsname].qcfilter.check_for_ancillary_qc(
data_field, add_if_missing=False, cleanup=False
)
if qc_data_field is None:
raise ValueError(f'No quality control ancillary variable in Dataset for {data_field}')
flag_masks = self._obj[dsname][qc_data_field].attrs['flag_masks']
flag_meanings = self._obj[dsname][qc_data_field].attrs['flag_meanings']
flag_assessments = self._obj[dsname][qc_data_field].attrs['flag_assessments']
# Get time ranges for green blocks
time_delta = determine_time_delta(xdata.values)
barh_list_green = reduce_time_ranges(xdata.values, time_delta=time_delta, broken_barh=True)
# Set background to gray indicating not available data
ax.set_facecolor('dimgray')
# Check if plotting 2D data vs 1D data. 2D data will be summarized by
# assessment category instead of showing each test.
data_shape = self._obj[dsname][qc_data_field].shape
if len(data_shape) > 1:
cur_assessments = list(set(flag_assessments))
cur_assessments.sort()
cur_assessments.reverse()
qc_data = np.full(data_shape, -1, dtype=np.int16)
plot_colors = []
tick_names = []
index = self._obj[dsname][qc_data_field].values == 0
if index.any():
qc_data[index] = 0
plot_colors.append(color_lookup['Not Failing'])
tick_names.append('Not Failing')
for ii, assess in enumerate(cur_assessments):
if assess not in color_lookup:
color_lookup[assess] = list(mplcolors.CSS4_COLORS.keys())[ii]
ii += 1
assess_data = self._obj[dsname].qcfilter.get_masked_data(
data_field, rm_assessments=assess
)
if assess_data.mask.any():
qc_data[assess_data.mask] = ii
plot_colors.append(color_lookup[assess])
tick_names.append(assess)
# Overwrite missing data. Not sure if we want to do this because VAPs set
# the value to missing but the test is set to Bad. This tries to overcome that
# by looking for correct test description that would only indicate the values
# are missing not that they are set to missing by a test... most likely.
missing_test_nums = []
for ii, flag_meaning in enumerate(flag_meanings):
# Check if the bit set is indicating missing data.
for val in missing_val_long_names:
if re_search(val, flag_meaning):
test_num = parse_bit(flag_masks[ii])[0]
missing_test_nums.append(test_num)
assess_data = self._obj[dsname].qcfilter.get_masked_data(
data_field, rm_tests=missing_test_nums
)
if assess_data.mask.any():
qc_data[assess_data.mask] = -1
plot_colors.append(color_lookup['Missing'])
tick_names.append('Missing')
# Create a masked array to allow not plotting where values are missing
qc_data = np.ma.masked_equal(qc_data, -1)
dims = self._obj[dsname][qc_data_field].dims
xvalues = self._obj[dsname][dims[0]].values
yvalues = self._obj[dsname][dims[1]].values
cMap = mplcolors.ListedColormap(plot_colors)
mesh = ax.pcolormesh(
xvalues,
yvalues,
np.transpose(qc_data),
cmap=cMap,
vmin=0,
shading=set_shading,
)
divider = make_axes_locatable(ax)
# Determine correct placement of words on colorbar
tick_nums = (
np.arange(0, len(tick_names) * 2 + 1) / (len(tick_names) * 2) * np.nanmax(qc_data)
)[1::2]
cax = divider.append_axes('bottom', size='5%', pad=0.3)
cbar = self.fig.colorbar(
mesh,
cax=cax,
orientation='horizontal',
spacing='uniform',
ticks=tick_nums,
shrink=0.5,
)
cbar.ax.set_xticklabels(tick_names)
# Set YTitle
dim_name = list(set(self._obj[dsname][qc_data_field].dims) - {'time'})
try:
ytitle = f"{dim_name[0]} ({self._obj[dsname][dim_name[0]].attrs['units']})"
ax.set_ylabel(ytitle)
except KeyError:
pass
# Add which tests were set as text to the plot
unique_values = []
for ii in np.unique(self._obj[dsname][qc_data_field].values):
unique_values.extend(parse_bit(ii))
if len(unique_values) > 0:
unique_values = list(set(unique_values))
unique_values.sort()
unique_values = [str(ii) for ii in unique_values]
self.fig.text(
0.5,
-0.35,
f"QC Tests Tripped: {', '.join(unique_values)}",
transform=ax.transAxes,
horizontalalignment='center',
verticalalignment='center',
fontweight='bold',
)
else:
test_nums = []
for ii, assess in enumerate(flag_assessments):
if assess not in color_lookup:
color_lookup[assess] = list(mplcolors.CSS4_COLORS.keys())[ii]
# Plot green data first.
ax.broken_barh(
barh_list_green,
(ii, ii + 1),
facecolors=color_lookup['Not Failing'],
edgecolor=edgecolor,
**kwargs,
)
# Get test number from flag_mask bitpacked number
test_nums.append(parse_bit(flag_masks[ii]))
# Get masked array data to use mask for finding if/where test is set
data = self._obj[dsname].qcfilter.get_masked_data(
data_field, rm_tests=test_nums[-1]
)
if np.any(data.mask):
# Get time ranges from time and masked data
barh_list = reduce_time_ranges(
xdata.values[data.mask], time_delta=time_delta, broken_barh=True
)
# Check if the bit set is indicating missing data. If so change
# to different plotting color than what is in flag_assessments.
for val in missing_val_long_names:
if re_search(val, flag_meanings[ii]):
assess = 'Missing'
break
# Lay down blocks of tripped tests using correct color
ax.broken_barh(
barh_list,
(ii, ii + 1),
facecolors=color_lookup[assess],
edgecolor=edgecolor,
**kwargs,
)
# Add test description to plot.
ax.text(xdata.values[0], ii + 0.5, ' ' + flag_meanings[ii], va='center')
# Change y ticks to test number
plt.yticks(
[ii + 0.5 for ii in range(0, len(test_nums))],
labels=['Test ' + str(ii[0]) for ii in test_nums],
)
# Set ylimit to number of tests plotted
ax.set_ylim(0, len(flag_assessments))
# Set X Limit - We want the same time axes for all subplots
if not hasattr(self, 'time_rng'):
if time_rng is not None:
self.time_rng = list(time_rng)
else:
self.time_rng = [xdata.min().values, xdata.max().values]
self.set_xrng(self.time_rng, subplot_index)
# Get X format - We want the same time axes for all subplots
if hasattr(self, 'time_fmt'):
ax.xaxis.set_major_formatter(self.time_fmt)
else:
# Set X Format
if len(subplot_index) == 1:
days = self.xrng[subplot_index, 1] - self.xrng[subplot_index, 0]
else:
days = (
self.xrng[subplot_index[0], subplot_index[1], 1]
- self.xrng[subplot_index[0], subplot_index[1], 0]
)
myFmt = common.get_date_format(days)
ax.xaxis.set_major_formatter(myFmt)
self.time_fmt = myFmt
return self.axes[subplot_index]
def fill_between(
self,
field,
dsname=None,
subplot_index=(0,),
set_title=None,
secondary_y=False,
**kwargs,
):
"""
Makes a fill_between plot, based on matplotlib
Parameters
----------
field : str
The name of the field to plot.
dsname : None or str
If there is more than one datastream in the display object the
name of the datastream needs to be specified. If set to None and
there is only one datastream ACT will use the sole datastream
in the object.
subplot_index : 1 or 2D tuple, list, or array
The index of the subplot to set the x range of.
set_title : str
The title for the plot.
secondary_y : boolean
Option to indicate if the data should be plotted on second y-axis.
**kwargs : keyword arguments
The keyword arguments for :func:`plt.plot` (1D timeseries) or
:func:`plt.pcolormesh` (2D timeseries).
Returns
-------
ax : matplotlib axis handle
The matplotlib axis handle of the plot.
"""
if dsname is None and len(self._obj.keys()) > 1:
raise ValueError(
'You must choose a datastream when there are 2 '
'or more datasets in the TimeSeriesDisplay '
'object.'
)
elif dsname is None:
dsname = list(self._obj.keys())[0]
# Get data and dimensions
data = self._obj[dsname][field]
dim = list(self._obj[dsname][field].dims)
xdata = self._obj[dsname][dim[0]]
if 'units' in data.attrs:
ytitle = ''.join(['(', data.attrs['units'], ')'])
else:
ytitle = field
# Get the current plotting axis, add day/night background and plot data
if self.fig is None:
self.fig = plt.figure()
if self.axes is None:
self.axes = np.array([plt.axes()])
self.fig.add_axes(self.axes[0])
# Set ax to appropriate axis
if secondary_y is False:
ax = self.axes[subplot_index]
else:
ax = self.axes[subplot_index].twinx()
ax.fill_between(xdata.values, data, **kwargs)
# Set X Format
if len(subplot_index) == 1:
days = self.xrng[subplot_index, 1] - self.xrng[subplot_index, 0]
else:
days = (
self.xrng[subplot_index[0], subplot_index[1], 1]
- self.xrng[subplot_index[0], subplot_index[1], 0]
)
myFmt = common.get_date_format(days)
ax.xaxis.set_major_formatter(myFmt)
# Set X format - We want the same time axes for all subplots
if not hasattr(self, 'time_fmt'):
self.time_fmt = myFmt
# Put on an xlabel, but only if we are making the bottom-most plot
if subplot_index[0] == self.axes.shape[0] - 1:
self.axes[subplot_index].set_xlabel('Time [UTC]')
# Set YTitle
ax.set_ylabel(ytitle)
# Set Title
if set_title is None:
set_title = ' '.join(
[
dsname,
field,
'on',
dt_utils.numpy_to_arm_date(self._obj[dsname].time.values[0]),
]
)
if secondary_y is False:
ax.set_title(set_title)
return self.axes[subplot_index]
<|code_end|>
examples/plot_multiple_column.py
<|code_start|><|code_end|>
|
act/plotting/timeseriesdisplay.py
<|code_start|>"""
Stores the class for TimeSeriesDisplay.
"""
import datetime as dt
import textwrap
import warnings
from copy import deepcopy
from re import search, search as re_search
import matplotlib as mpl
import matplotlib.dates as mdates
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from matplotlib import colors as mplcolors
from mpl_toolkits.axes_grid1 import make_axes_locatable
from scipy.interpolate import NearestNDInterpolator
from ..qc.qcfilter import parse_bit
from ..utils import data_utils, datetime_utils as dt_utils
from ..utils.datetime_utils import determine_time_delta, reduce_time_ranges
from ..utils.geo_utils import get_sunrise_sunset_noon
from . import common
from .plot import Display
class TimeSeriesDisplay(Display):
"""
This subclass contains routines that are specific to plotting
time series plots from data. It is inherited from Display and therefore
contains all of Display's attributes and methods.
Examples
--------
To create a TimeSeriesDisplay with 3 rows, simply do:
.. code-block:: python
ds = act.read_netcdf(the_file)
disp = act.plotting.TimeSeriesDisplay(ds, subplot_shape=(3,), figsize=(15, 5))
The TimeSeriesDisplay constructor takes in the same keyword arguments as
plt.subplots. For more information on the plt.subplots keyword arguments,
see the `matplotlib documentation
<https://matplotlib.org/api/_as_gen/matplotlib.pyplot.subplots.html>`_.
If no subplot_shape is provided, then no figure or axis will be created
until add_subplots or plots is called.
"""
def __init__(self, obj, subplot_shape=(1,), ds_name=None, **kwargs):
super().__init__(obj, subplot_shape, ds_name, **kwargs)
def day_night_background(self, dsname=None, subplot_index=(0,)):
"""
Colorcodes the background according to sunrise/sunset.
Parameters
----------
dsname : None or str
If there is more than one datastream in the display object the
name of the datastream needs to be specified. If set to None and
there is only one datastream then ACT will use the sole datastream
in the object.
subplot_index : 1 or 2D tuple, list, or array
The index to the subplot to place the day and night background in.
"""
if dsname is None and len(self._obj.keys()) > 1:
raise ValueError(
'You must choose a datastream to derive the '
+ 'information needed for the day and night '
+ 'background when 2 or more datasets are in '
+ 'the display object.'
)
elif dsname is None:
dsname = list(self._obj.keys())[0]
# Get File Dates
try:
file_dates = self._obj[dsname].attrs['_file_dates']
except KeyError:
file_dates = []
if len(file_dates) == 0:
sdate = dt_utils.numpy_to_arm_date(self._obj[dsname].time.values[0])
edate = dt_utils.numpy_to_arm_date(self._obj[dsname].time.values[-1])
file_dates = [sdate, edate]
all_dates = dt_utils.dates_between(file_dates[0], file_dates[-1])
if self.axes is None:
raise RuntimeError('day_night_background requires the plot to ' 'be displayed.')
ax = self.axes[subplot_index]
# Find variable names for latitude and longitude
variables = list(self._obj[dsname].data_vars)
lat_name = [var for var in ['lat', 'latitude'] if var in variables]
lon_name = [var for var in ['lon', 'longitude'] if var in variables]
if len(lat_name) == 0:
lat_name = None
else:
lat_name = lat_name[0]
if len(lon_name) == 0:
lon_name = None
else:
lon_name = lon_name[0]
# Variable name does not match, look for standard_name declaration
if lat_name is None or lon_name is None:
for var in variables:
try:
if self._obj[dsname][var].attrs['standard_name'] == 'latitude':
lat_name = var
except KeyError:
pass
try:
if self._obj[dsname][var].attrs['standard_name'] == 'longitude':
lon_name = var
except KeyError:
pass
if lat_name is not None and lon_name is not None:
break
if lat_name is None or lon_name is None:
return
try:
if self._obj[dsname][lat_name].data.size > 1:
# Look for non-NaN values to use for locaiton. If not found use first value.
lat = self._obj[dsname][lat_name].values
index = np.where(np.isfinite(lat))[0]
if index.size == 0:
index = [0]
lat = float(lat[index[0]])
# Look for non-NaN values to use for locaiton. If not found use first value.
lon = self._obj[dsname][lon_name].values
index = np.where(np.isfinite(lon))[0]
if index.size == 0:
index = [0]
lon = float(lon[index[0]])
else:
lat = float(self._obj[dsname][lat_name].values)
lon = float(self._obj[dsname][lon_name].values)
except AttributeError:
return
if not np.isfinite(lat):
warnings.warn(f"Latitude value in dataset of '{lat}' is not finite. ", RuntimeWarning)
return
if not np.isfinite(lon):
warnings.warn(f"Longitude value in dataset of '{lon}' is not finite. ", RuntimeWarning)
return
lat_range = [-90, 90]
if not (lat_range[0] <= lat <= lat_range[1]):
warnings.warn(
f"Latitude value in dataset of '{lat}' not within acceptable "
f'range of {lat_range[0]} <= latitude <= {lat_range[1]}. ',
RuntimeWarning,
)
return
lon_range = [-180, 180]
if not (lon_range[0] <= lon <= lon_range[1]):
warnings.warn(
f"Longitude value in dataset of '{lon}' not within acceptable "
f'range of {lon_range[0]} <= longitude <= {lon_range[1]}. ',
RuntimeWarning,
)
return
# initialize the plot to a gray background for total darkness
rect = ax.patch
rect.set_facecolor('0.85')
# Get date ranges to plot
plot_dates = []
for f in all_dates:
for ii in [-1, 0, 1]:
plot_dates.append(f + dt.timedelta(days=ii))
# Get sunrise, sunset and noon times
sunrise, sunset, noon = get_sunrise_sunset_noon(lat, lon, plot_dates)
# Plot daylight
for ii in range(0, len(sunrise)):
ax.axvspan(sunrise[ii], sunset[ii], facecolor='#FFFFCC', zorder=0)
# Plot noon line
for ii in noon:
ax.axvline(x=ii, linestyle='--', color='y', zorder=1)
def set_xrng(self, xrng, subplot_index=(0,)):
"""
Sets the x range of the plot.
Parameters
----------
xrng : 2 number array
The x limits of the plot.
subplot_index : 1 or 2D tuple, list, or array
The index of the subplot to set the x range of.
"""
if self.axes is None:
raise RuntimeError('set_xrng requires the plot to be displayed.')
self.axes[subplot_index].set_xlim(xrng)
# Make sure that the xrng value is a numpy array not pandas
if isinstance(xrng[0], pd.Timestamp):
xrng = [x.to_numpy() for x in xrng if isinstance(x, pd.Timestamp)]
if len(subplot_index) < 2:
self.xrng[subplot_index, 0] = xrng[0].astype('datetime64[D]').astype(float)
self.xrng[subplot_index, 1] = xrng[1].astype('datetime64[D]').astype(float)
else:
self.xrng[subplot_index][0] = xrng[0].astype('datetime64[D]').astype(float)
self.xrng[subplot_index][1] = xrng[1].astype('datetime64[D]').astype(float)
def set_yrng(self, yrng, subplot_index=(0,)):
"""
Sets the y range of the plot.
Parameters
----------
yrng : 2 number array
The y limits of the plot.
subplot_index : 1 or 2D tuple, list, or array
The index of the subplot to set the x range of.
"""
if self.axes is None:
raise RuntimeError('set_yrng requires the plot to be displayed.')
if not hasattr(self, 'yrng') and len(self.axes.shape) == 2:
self.yrng = np.zeros((self.axes.shape[0], self.axes.shape[1], 2))
elif not hasattr(self, 'yrng') and len(self.axes.shape) == 1:
self.yrng = np.zeros((self.axes.shape[0], 2))
if yrng[0] == yrng[1]:
yrng[1] = yrng[1] + 1
self.axes[subplot_index].set_ylim(yrng)
try:
self.yrng[subplot_index, :] = yrng
except IndexError:
self.yrng[subplot_index] = yrng
def plot(
self,
field,
dsname=None,
subplot_index=(0,),
cmap=None,
set_title=None,
add_nan=False,
day_night_background=False,
invert_y_axis=False,
abs_limits=(None, None),
time_rng=None,
y_rng=None,
use_var_for_y=None,
set_shading='auto',
assessment_overplot=False,
overplot_marker='.',
overplot_behind=False,
overplot_markersize=6,
assessment_overplot_category={
'Incorrect': ['Bad', 'Incorrect'],
'Suspect': ['Indeterminate', 'Suspect'],
},
assessment_overplot_category_color={'Incorrect': 'red', 'Suspect': 'orange'},
force_line_plot=False,
labels=False,
cbar_label=None,
cbar_h_adjust=None,
secondary_y=False,
y_axis_flag_meanings=False,
colorbar_labels=None,
**kwargs,
):
"""
Makes a timeseries plot. If subplots have not been added yet, an axis
will be created assuming that there is only going to be one plot.
Parameters
----------
field : str
The name of the field to plot.
dsname : None or str
If there is more than one datastream in the display object the
name of the datastream needs to be specified. If set to None and
there is only one datastream ACT will use the sole datastream
in the object.
subplot_index : 1 or 2D tuple, list, or array
The index of the subplot to set the x range of.
cmap : matplotlib colormap
The colormap to use.
set_title : str
The title for the plot.
add_nan : bool
Set to True to fill in data gaps with NaNs.
day_night_background : bool
Set to True to fill in a color coded background.
according to the time of day.
abs_limits : tuple or list
Sets the bounds on plot limits even if data values exceed
those limits. Set to (ymin,ymax). Use None if only setting
minimum or maximum limit, i.e. (22., None).
time_rng : tuple or list
List or tuple with (min, max) values to set the x-axis range
limits.
y_rng : tuple or list
List or tuple with (min, max) values to set the y-axis range
use_var_for_y : str
Set this to the name of a data variable in the Dataset to use as
the y-axis variable instead of the default dimension. Useful for
instances where data has an index-based dimension instead of a
height-based dimension. If shapes of arrays do not match it will
automatically revert back to the original ydata.
set_shading : string
Option to to set the matplotlib.pcolormesh shading parameter.
Default to 'auto'
assessment_overplot : boolean
Option to overplot quality control colored symbols over plotted
data using flag_assessment categories.
overplot_marker : str
Marker to use for overplot symbol.
overplot_behind : bool
Place the overplot marker behind the data point.
overplot_markersize : float or int
Size of overplot marker. If overplot_behind or force_line_plot
are set the marker size will be double overplot_markersize so
the color is visible.
assessment_overplot_category : dict
Lookup to categorize assessments into groups. This allows using
multiple terms for the same quality control level of failure.
Also allows adding more to the defaults.
assessment_overplot_category_color : dict
Lookup to match overplot category color to assessment grouping.
force_line_plot : boolean
Option to plot 2D data as 1D line plots.
labels : boolean or list
Option to overwrite the legend labels. Must have same dimensions as
number of lines plotted.
cbar_label : str
Option to overwrite default colorbar label.
cbar_h_adjust : float
Option to adjust location of colorbar horizontally. Positive values
move to right negative values move to left.
secondary_y : boolean
Option to plot on secondary y axis.
y_axis_flag_meanings : boolean or int
When set to True and plotting state variable with flag_values and
flag_meanings attribures will replace y axis numerical values
with flag_meanings value. Set to a positive number larger than 1
to indicate maximum word length to use. If text is longer that the
value and has space characters will split text over multiple lines.
colorbar_labels : dict
A dictionary containing values for plotting a 2D array of state variables.
The dictionary uses data values as keys and a dictionary containing keys
'text' and 'color' for each data value to plot.
Example:
{0: {'text': 'Clear sky', 'color': 'white'},
1: {'text': 'Liquid', 'color': 'green'},
2: {'text': 'Ice', 'color': 'blue'},
3: {'text': 'Mixed phase', 'color': 'purple'}}
**kwargs : keyword arguments
The keyword arguments for :func:`plt.plot` (1D timeseries) or
:func:`plt.pcolormesh` (2D timeseries).
Returns
-------
ax : matplotlib axis handle
The matplotlib axis handle of the plot.
"""
if dsname is None and len(self._obj.keys()) > 1:
raise ValueError(
'You must choose a datastream when there are 2 '
'or more datasets in the TimeSeriesDisplay '
'object.'
)
elif dsname is None:
dsname = list(self._obj.keys())[0]
if y_axis_flag_meanings:
kwargs['linestyle'] = ''
# Get data and dimensions
data = self._obj[dsname][field]
dim = list(self._obj[dsname][field].dims)
xdata = self._obj[dsname][dim[0]]
if 'units' in data.attrs:
ytitle = ''.join(['(', data.attrs['units'], ')'])
else:
ytitle = field
if cbar_label is None:
cbar_default = ytitle
if len(dim) > 1:
if use_var_for_y is None:
ydata = self._obj[dsname][dim[1]]
else:
ydata = self._obj[dsname][use_var_for_y]
ydata_dim1 = self._obj[dsname][dim[1]]
if np.shape(ydata) != np.shape(ydata_dim1):
ydata = ydata_dim1
units = ytitle
if 'units' in ydata.attrs.keys():
units = ydata.attrs['units']
ytitle = ''.join(['(', units, ')'])
else:
units = ''
ytitle = dim[1]
# Create labels if 2d as 1d
if force_line_plot is True:
if labels is True:
labels = [' '.join([str(d), units]) for d in ydata.values]
ytitle = f"({data.attrs['units']})"
ydata = None
else:
ydata = None
# Get the current plotting axis, add day/night background and plot data
if self.fig is None:
self.fig = plt.figure()
if self.axes is None:
self.axes = np.array([plt.axes()])
self.fig.add_axes(self.axes[0])
# Set up secondary y axis if requested
if secondary_y is False:
ax = self.axes[subplot_index]
else:
ax = self.axes[subplot_index].twinx()
if colorbar_labels is not None:
flag_values = list(colorbar_labels.keys())
flag_meanings = [value['text'] for key, value in colorbar_labels.items()]
cbar_colors = [value['color'] for key, value in colorbar_labels.items()]
cmap = mpl.colors.ListedColormap(cbar_colors)
for ii, flag_meaning in enumerate(flag_meanings):
if len(flag_meaning) > 20:
flag_meaning = textwrap.fill(flag_meaning, width=20)
flag_meanings[ii] = flag_meaning
else:
flag_values = None
flag_meanings = None
cbar_colors = None
if ydata is None:
# Add in nans to ensure the data does not connect the line.
if add_nan is True:
xdata, data = data_utils.add_in_nan(xdata, data)
if day_night_background is True:
self.day_night_background(subplot_index=subplot_index, dsname=dsname)
# If limiting data being plotted use masked arrays
# Need to do it this way because of autoscale() method
if abs_limits[0] is not None and abs_limits[1] is not None:
data = np.ma.masked_outside(data, abs_limits[0], abs_limits[1])
elif abs_limits[0] is not None and abs_limits[1] is None:
data = np.ma.masked_less_equal(data, abs_limits[0])
elif abs_limits[0] is None and abs_limits[1] is not None:
data = np.ma.masked_greater_equal(data, abs_limits[1])
# Plot the data
if 'marker' not in kwargs.keys():
kwargs['marker'] = '.'
lines = ax.plot(xdata, data, **kwargs)
# Check if we need to call legend method after plotting. This is only
# called when no assessment overplot is called.
add_legend = False
if 'label' in kwargs.keys():
add_legend = True
# Overplot failing data if requested
if assessment_overplot:
# If we are doing forced line plot from 2D data need to manage
# legend lables. Will make arrays to hold labels of QC failing
# because not set when labels not set.
if not isinstance(labels, list) and add_legend is False:
labels = []
lines = []
# For forced line plot need to plot QC behind point instead of
# on top of point.
zorder = None
if force_line_plot or overplot_behind:
zorder = 0
overplot_markersize *= 2.0
for assessment, categories in assessment_overplot_category.items():
flag_data = self._obj[dsname].qcfilter.get_masked_data(
field, rm_assessments=categories, return_inverse=True
)
if np.invert(flag_data.mask).any() and np.isfinite(flag_data).any():
try:
flag_data.mask = np.logical_or(data.mask, flag_data.mask)
except AttributeError:
pass
qc_ax = ax.plot(
xdata,
flag_data,
marker=overplot_marker,
linestyle='',
markersize=overplot_markersize,
color=assessment_overplot_category_color[assessment],
label=assessment,
zorder=zorder,
)
# If labels keyword is set need to add labels for calling legend
if isinstance(labels, list):
# If plotting forced_line_plot need to subset the Line2D object
# so we don't have more than one added to legend.
if len(qc_ax) > 1:
lines.extend(qc_ax[:1])
else:
lines.extend(qc_ax)
labels.append(assessment)
add_legend = True
# Add legend if labels are available
if isinstance(labels, list):
ax.legend(lines, labels)
elif add_legend:
ax.legend()
# Change y axis to text from flag_meanings if requested.
if y_axis_flag_meanings:
flag_meanings = self._obj[dsname][field].attrs['flag_meanings']
flag_values = self._obj[dsname][field].attrs['flag_values']
# If keyword is larger than 1 assume this is the maximum character length
# desired and insert returns to wrap text.
if y_axis_flag_meanings > 1:
for ii, flag_meaning in enumerate(flag_meanings):
if len(flag_meaning) > y_axis_flag_meanings:
flag_meaning = textwrap.fill(flag_meaning, width=y_axis_flag_meanings)
flag_meanings[ii] = flag_meaning
ax.set_yticks(flag_values)
ax.set_yticklabels(flag_meanings)
else:
# Add in nans to ensure the data are not streaking
if add_nan is True:
xdata, data = data_utils.add_in_nan(xdata, data)
# Sets shading parameter to auto. Matplotlib will check deminsions.
# If X,Y and C are same deminsions shading is set to nearest.
# If X and Y deminsions are 1 greater than C shading is set to flat.
if 'edgecolors' not in kwargs.keys():
kwargs['edgecolors'] = 'face'
mesh = ax.pcolormesh(
np.asarray(xdata),
ydata,
data.transpose(),
shading=set_shading,
cmap=cmap,
**kwargs,
)
# Set Title
if set_title is None:
if isinstance(self._obj[dsname].time.values[0], np.datetime64):
set_title = ' '.join(
[
dsname,
field,
'on',
dt_utils.numpy_to_arm_date(self._obj[dsname].time.values[0]),
]
)
else:
date_result = search(
r'\d{4}-\d{1,2}-\d{1,2}', self._obj[dsname].time.attrs['units']
)
if date_result is not None:
set_title = ' '.join([dsname, field, 'on', date_result.group(0)])
else:
set_title = ' '.join([dsname, field])
if secondary_y is False:
ax.set_title(set_title)
# Set YTitle
if not y_axis_flag_meanings:
ax.set_ylabel(ytitle)
# Set X Limit - We want the same time axes for all subplots
if not hasattr(self, 'time_rng'):
if time_rng is not None:
self.time_rng = list(time_rng)
else:
self.time_rng = [xdata.min().values, xdata.max().values]
self.set_xrng(self.time_rng, subplot_index)
# Set Y Limit
if y_rng is not None:
self.set_yrng(y_rng)
if hasattr(self, 'yrng'):
# Make sure that the yrng is not just the default
if ydata is None:
if abs_limits[0] is not None or abs_limits[1] is not None:
our_data = data
else:
our_data = data.values
else:
our_data = ydata
finite = np.isfinite(our_data)
if finite.any():
our_data = our_data[finite]
if invert_y_axis is False:
yrng = [np.min(our_data), np.max(our_data)]
else:
yrng = [np.max(our_data), np.min(our_data)]
else:
yrng = [0, 1]
# Check if current range is outside of new range an only set
# values that work for all data plotted.
if isinstance(yrng[0], np.datetime64):
yrng = mdates.datestr2num([str(yrng[0]), str(yrng[1])])
current_yrng = ax.get_ylim()
if yrng[0] > current_yrng[0]:
yrng[0] = current_yrng[0]
if yrng[1] < current_yrng[1]:
yrng[1] = current_yrng[1]
# Set y range the normal way if not secondary y
# If secondary, just use set_ylim
if secondary_y is False:
self.set_yrng(yrng, subplot_index)
else:
ax.set_ylim(yrng)
# Set X Format
if len(subplot_index) == 1:
days = self.xrng[subplot_index, 1] - self.xrng[subplot_index, 0]
else:
days = self.xrng[subplot_index][1] - self.xrng[subplot_index][0]
myFmt = common.get_date_format(days)
ax.xaxis.set_major_formatter(myFmt)
# Set X format - We want the same time axes for all subplots
if not hasattr(self, 'time_fmt'):
self.time_fmt = myFmt
# Put on an xlabel, but only if we are making the bottom-most plot
if subplot_index[0] == self.axes.shape[0] - 1:
ax.set_xlabel('Time [UTC]')
if ydata is not None:
if cbar_label is None:
cbar_title = cbar_default
else:
cbar_title = ''.join(['(', cbar_label, ')'])
if colorbar_labels is not None:
cbar_title = None
cbar = self.add_colorbar(
mesh,
title=cbar_title,
subplot_index=subplot_index,
values=flag_values,
pad=cbar_h_adjust,
)
cbar.set_ticks(flag_values)
cbar.set_ticklabels(flag_meanings)
cbar.ax.tick_params(labelsize=10)
else:
self.add_colorbar(
mesh, title=cbar_title, subplot_index=subplot_index, pad=cbar_h_adjust
)
return ax
def plot_barbs_from_spd_dir(
self, speed_field, direction_field, pres_field=None, dsname=None, **kwargs
):
"""
This procedure will make a wind barb plot timeseries.
If a pressure field is given and the wind fields are 1D, which, for
example, would occur if one wants to plot a timeseries of
rawinsonde data, then a time-height cross section of
winds will be made.
Note: This procedure calls plot_barbs_from_u_v and will take in the
same keyword arguments as that procedure.
Parameters
----------
speed_field : str
The name of the field specifying the wind speed in m/s.
direction_field : str
The name of the field specifying the wind direction in degrees.
0 degrees is defined to be north and increases clockwise like
what is used in standard meteorological notation.
pres_field : str
The name of the field specifying pressure or height. If using
height coordinates, then we recommend setting invert_y_axis
to False.
dsname : str
The name of the datastream to plot. Setting to None will make
ACT attempt to autodetect this.
kwargs : dict
Any additional keyword arguments will be passed into
:func:`act.plotting.TimeSeriesDisplay.plot_barbs_from_u_and_v`.
Returns
-------
the_ax : matplotlib axis handle
The handle to the axis where the plot was made on.
Examples
--------
..code-block :: python
sonde_ds = act.io.armfiles.read_netcdf(
act.tests.sample_files.EXAMPLE_TWP_SONDE_WILDCARD)
BarbDisplay = act.plotting.TimeSeriesDisplay(
{'sonde_darwin': sonde_ds}, figsize=(10,5))
BarbDisplay.plot_barbs_from_spd_dir('deg', 'wspd', 'pres',
num_barbs_x=20)
"""
if dsname is None and len(self._obj.keys()) > 1:
raise ValueError(
'You must choose a datastream when there are 2 '
'or more datasets in the TimeSeriesDisplay '
'object.'
)
elif dsname is None:
dsname = list(self._obj.keys())[0]
# Make temporary field called tempu, tempv
spd = self._obj[dsname][speed_field]
dir = self._obj[dsname][direction_field]
tempu = -np.sin(np.deg2rad(dir)) * spd
tempv = -np.cos(np.deg2rad(dir)) * spd
self._obj[dsname]['temp_u'] = deepcopy(self._obj[dsname][speed_field])
self._obj[dsname]['temp_v'] = deepcopy(self._obj[dsname][speed_field])
self._obj[dsname]['temp_u'].values = tempu
self._obj[dsname]['temp_v'].values = tempv
the_ax = self.plot_barbs_from_u_v('temp_u', 'temp_v', pres_field, dsname, **kwargs)
del self._obj[dsname]['temp_u'], self._obj[dsname]['temp_v']
return the_ax
def plot_barbs_from_u_v(
self,
u_field,
v_field,
pres_field=None,
dsname=None,
subplot_index=(0,),
set_title=None,
day_night_background=False,
invert_y_axis=True,
num_barbs_x=20,
num_barbs_y=20,
use_var_for_y=None,
**kwargs,
):
"""
This function will plot a wind barb timeseries from u and v wind
data. If pres_field is given, a time-height series will be plotted
from 1-D wind data.
Parameters
----------
u_field : str
The name of the field containing the U component of the wind.
v_field : str
The name of the field containing the V component of the wind.
pres_field : str or None
The name of the field containing the pressure or height. Set
to None to not use this.
dsname : str or None
The name of the datastream to plot. Setting to None will make
ACT automatically try to determine this.
subplot_index : 2-tuple
The index of the subplot to make the plot on.
set_title : str or None
The title of the plot.
day_night_background : bool
Set to True to plot a day/night background.
invert_y_axis : bool
Set to True to invert the y axis (i.e. for plotting pressure as
the height coordinate).
num_barbs_x : int
The number of wind barbs to plot in the x axis.
num_barbs_y : int
The number of wind barbs to plot in the y axis.
cmap : matplotlib.colors.LinearSegmentedColormap
A color map to use with wind barbs. If this is set the plt.barbs
routine will be passed the C parameter scaled as sqrt of sum of the
squares and used with the passed in color map. A colorbar will also
be added. Setting the limits of the colorbar can be done with 'clim'.
Setting this changes the wind barbs from black to colors.
use_var_for_y : str
Set this to the name of a data variable in the Dataset to use as the
y-axis variable instead of the default dimension. Useful for instances
where data has an index-based dimension instead of a height-based
dimension. If shapes of arrays do not match it will automatically
revert back to the original ydata.
**kwargs : keyword arguments
Additional keyword arguments will be passed into plt.barbs.
Returns
-------
ax : matplotlib axis handle
The axis handle that contains the reference to the
constructed plot.
"""
if dsname is None and len(self._obj.keys()) > 1:
raise ValueError(
'You must choose a datastream when there are 2 '
'or more datasets in the TimeSeriesDisplay '
'object.'
)
elif dsname is None:
dsname = list(self._obj.keys())[0]
# Get data and dimensions
u = self._obj[dsname][u_field].values
v = self._obj[dsname][v_field].values
dim = list(self._obj[dsname][u_field].dims)
xdata = self._obj[dsname][dim[0]].values
num_x = xdata.shape[-1]
barb_step_x = round(num_x / num_barbs_x)
if barb_step_x == 0:
barb_step_x = 1
if len(dim) > 1 and pres_field is None:
if use_var_for_y is None:
ydata = self._obj[dsname][dim[1]]
else:
ydata = self._obj[dsname][use_var_for_y]
ydata_dim1 = self._obj[dsname][dim[1]]
if np.shape(ydata) != np.shape(ydata_dim1):
ydata = ydata_dim1
if 'units' in ydata.attrs:
units = ydata.attrs['units']
else:
units = ''
ytitle = ''.join(['(', units, ')'])
num_y = ydata.shape[0]
barb_step_y = round(num_y / num_barbs_y)
if barb_step_y == 0:
barb_step_y = 1
xdata, ydata = np.meshgrid(xdata, ydata, indexing='ij')
elif pres_field is not None:
# What we will do here is do a nearest-neighbor interpolation
# for each member of the series. Coordinates are time, pressure
pres = self._obj[dsname][pres_field]
u_interp = NearestNDInterpolator((xdata, pres.values), u, rescale=True)
v_interp = NearestNDInterpolator((xdata, pres.values), v, rescale=True)
barb_step_x = 1
barb_step_y = 1
x_times = pd.date_range(xdata.min(), xdata.max(), periods=num_barbs_x)
if num_barbs_y == 1:
y_levels = pres.mean()
else:
y_levels = np.linspace(np.nanmin(pres), np.nanmax(pres), num_barbs_y)
xdata, ydata = np.meshgrid(x_times, y_levels, indexing='ij')
u = u_interp(xdata, ydata)
v = v_interp(xdata, ydata)
if 'units' in pres.attrs:
units = pres.attrs['units']
else:
units = ''
ytitle = ''.join(['(', units, ')'])
else:
ydata = None
# Get the current plotting axis, add day/night background and plot data
if self.fig is None:
self.fig = plt.figure()
if self.axes is None:
self.axes = np.array([plt.axes()])
self.fig.add_axes(self.axes[0])
if ydata is None:
ydata = np.ones(xdata.shape)
if 'cmap' in kwargs.keys():
map_color = np.sqrt(np.power(u[::barb_step_x], 2) + np.power(v[::barb_step_x], 2))
map_color[np.isnan(map_color)] = 0
ax = self.axes[subplot_index].barbs(
xdata[::barb_step_x],
ydata[::barb_step_x],
u[::barb_step_x],
v[::barb_step_x],
map_color,
**kwargs,
)
plt.colorbar(
ax,
ax=[self.axes[subplot_index]],
label='Wind Speed (' + self._obj[dsname][u_field].attrs['units'] + ')',
)
else:
self.axes[subplot_index].barbs(
xdata[::barb_step_x],
ydata[::barb_step_x],
u[::barb_step_x],
v[::barb_step_x],
**kwargs,
)
self.axes[subplot_index].set_yticks([])
else:
if 'cmap' in kwargs.keys():
map_color = np.sqrt(
np.power(u[::barb_step_x, ::barb_step_y], 2)
+ np.power(v[::barb_step_x, ::barb_step_y], 2)
)
map_color[np.isnan(map_color)] = 0
ax = self.axes[subplot_index].barbs(
xdata[::barb_step_x, ::barb_step_y],
ydata[::barb_step_x, ::barb_step_y],
u[::barb_step_x, ::barb_step_y],
v[::barb_step_x, ::barb_step_y],
map_color,
**kwargs,
)
plt.colorbar(
ax,
ax=[self.axes[subplot_index]],
label='Wind Speed (' + self._obj[dsname][u_field].attrs['units'] + ')',
)
else:
ax = self.axes[subplot_index].barbs(
xdata[::barb_step_x, ::barb_step_y],
ydata[::barb_step_x, ::barb_step_y],
u[::barb_step_x, ::barb_step_y],
v[::barb_step_x, ::barb_step_y],
**kwargs,
)
if day_night_background is True:
self.day_night_background(subplot_index=subplot_index, dsname=dsname)
# Set Title
if set_title is None:
set_title = ' '.join(
[
dsname,
'on',
dt_utils.numpy_to_arm_date(self._obj[dsname].time.values[0]),
]
)
self.axes[subplot_index].set_title(set_title)
# Set YTitle
if 'ytitle' in locals():
self.axes[subplot_index].set_ylabel(ytitle)
# Set X Limit - We want the same time axes for all subplots
time_rng = [xdata.min(), xdata.max()]
self.set_xrng(time_rng, subplot_index)
# Set Y Limit
if hasattr(self, 'yrng'):
# Make sure that the yrng is not just the default
if not np.all(self.yrng[subplot_index] == 0):
self.set_yrng(self.yrng[subplot_index], subplot_index)
else:
if ydata is None:
our_data = xdata
else:
our_data = ydata
if np.isfinite(our_data).any():
if invert_y_axis is False:
yrng = [np.nanmin(our_data), np.nanmax(our_data)]
else:
yrng = [np.nanmax(our_data), np.nanmin(our_data)]
else:
yrng = [0, 1]
self.set_yrng(yrng, subplot_index)
# Set X Format
if len(subplot_index) == 1:
days = self.xrng[subplot_index, 1] - self.xrng[subplot_index, 0]
else:
days = (
self.xrng[subplot_index[0], subplot_index[1], 1]
- self.xrng[subplot_index[0], subplot_index[1], 0]
)
# Put on an xlabel, but only if we are making the bottom-most plot
if subplot_index[0] == self.axes.shape[0] - 1:
self.axes[subplot_index].set_xlabel('Time [UTC]')
myFmt = common.get_date_format(days)
self.axes[subplot_index].xaxis.set_major_formatter(myFmt)
return self.axes[subplot_index]
def plot_time_height_xsection_from_1d_data(
self,
data_field,
pres_field,
dsname=None,
subplot_index=(0,),
set_title=None,
day_night_background=False,
num_time_periods=20,
num_y_levels=20,
invert_y_axis=True,
cbar_label=None,
set_shading='auto',
**kwargs,
):
"""
This will plot a time-height cross section from 1D datasets using
nearest neighbor interpolation on a regular time by height grid.
All that is needed are a data variable and a height variable.
Parameters
----------
data_field : str
The name of the field to plot.
pres_field : str
The name of the height or pressure field to plot.
dsname : str or None
The name of the datastream to plot
subplot_index : 2-tuple
The index of the subplot to create the plot on.
set_title : str or None
The title of the plot.
day_night_background : bool
Set to true to plot the day/night background.
num_time_periods : int
Set to determine how many time periods. Setting to None
will do one time period per day.
num_y_levels : int
The number of levels in the y axis to use.
invert_y_axis : bool
Set to true to invert the y-axis (recommended for
pressure coordinates).
cbar_label : str
Option to overwrite default colorbar label.
set_shading : string
Option to to set the matplotlib.pcolormesh shading parameter.
Default to 'auto'
**kwargs : keyword arguments
Additional keyword arguments will be passed
into :func:`plt.pcolormesh`
Returns
-------
ax : matplotlib axis handle
The matplotlib axis handle pointing to the plot.
"""
if dsname is None and len(self._obj.keys()) > 1:
raise ValueError(
'You must choose a datastream when there are 2'
'or more datasets in the TimeSeriesDisplay'
'object.'
)
elif dsname is None:
dsname = list(self._obj.keys())[0]
dim = list(self._obj[dsname][data_field].dims)
if len(dim) > 1:
raise ValueError(
'plot_time_height_xsection_from_1d_data only '
'supports 1-D datasets. For datasets with 2 or '
'more dimensions use plot().'
)
# Get data and dimensions
data = self._obj[dsname][data_field].values
xdata = self._obj[dsname][dim[0]].values
# What we will do here is do a nearest-neighbor interpolation for each
# member of the series. Coordinates are time, pressure
pres = self._obj[dsname][pres_field]
u_interp = NearestNDInterpolator((xdata, pres.values), data, rescale=True)
# Mask points where we have no data
# Count number of unique days
x_times = pd.date_range(xdata.min(), xdata.max(), periods=num_time_periods)
y_levels = np.linspace(np.nanmin(pres), np.nanmax(pres), num_y_levels)
tdata, ydata = np.meshgrid(x_times, y_levels, indexing='ij')
data = u_interp(tdata, ydata)
ytitle = ''.join(['(', pres.attrs['units'], ')'])
units = data_field + ' (' + self._obj[dsname][data_field].attrs['units'] + ')'
# Get the current plotting axis, add day/night background and plot data
if self.fig is None:
self.fig = plt.figure()
if self.axes is None:
self.axes = np.array([plt.axes()])
self.fig.add_axes(self.axes[0])
mesh = self.axes[subplot_index].pcolormesh(
x_times, y_levels, np.transpose(data), shading=set_shading, **kwargs
)
if day_night_background is True:
self.day_night_background(subplot_index=subplot_index, dsname=dsname)
# Set Title
if set_title is None:
set_title = ' '.join(
[
dsname,
'on',
dt_utils.numpy_to_arm_date(self._obj[dsname].time.values[0]),
]
)
self.axes[subplot_index].set_title(set_title)
# Set YTitle
if 'ytitle' in locals():
self.axes[subplot_index].set_ylabel(ytitle)
# Set X Limit - We want the same time axes for all subplots
time_rng = [x_times[0], x_times[-1]]
self.set_xrng(time_rng, subplot_index)
# Set Y Limit
if hasattr(self, 'yrng'):
# Make sure that the yrng is not just the default
if not np.all(self.yrng[subplot_index] == 0):
self.set_yrng(self.yrng[subplot_index], subplot_index)
else:
if ydata is None:
our_data = data.values
else:
our_data = ydata
if np.isfinite(our_data).any():
if invert_y_axis is False:
yrng = [np.nanmin(our_data), np.nanmax(our_data)]
else:
yrng = [np.nanmax(our_data), np.nanmin(our_data)]
else:
yrng = [0, 1]
self.set_yrng(yrng, subplot_index)
# Set X Format
if len(subplot_index) == 1:
days = self.xrng[subplot_index, 1] - self.xrng[subplot_index, 0]
else:
days = (
self.xrng[subplot_index[0], subplot_index[1], 1]
- self.xrng[subplot_index[0], subplot_index[1], 0]
)
# Put on an xlabel, but only if we are making the bottom-most plot
if subplot_index[0] == self.axes.shape[0] - 1:
self.axes[subplot_index].set_xlabel('Time [UTC]')
if ydata is not None:
if cbar_label is None:
self.add_colorbar(mesh, title=units, subplot_index=subplot_index)
else:
self.add_colorbar(mesh, title=cbar_label, subplot_index=subplot_index)
myFmt = common.get_date_format(days)
self.axes[subplot_index].xaxis.set_major_formatter(myFmt)
return self.axes[subplot_index]
def time_height_scatter(
self,
data_field=None,
dsname=None,
cmap='rainbow',
alt_label=None,
alt_field='alt',
cb_label=None,
**kwargs,
):
"""
Create a time series plot of altitude and data variable with
color also indicating value with a color bar. The Color bar is
positioned to serve both as the indicator of the color intensity
and the second y-axis.
Parameters
----------
data_field : str
Name of data field in the object to plot on second y-axis.
height_field : str
Name of height field in the object to plot on first y-axis.
dsname : str or None
The name of the datastream to plot.
cmap : str
Colorbar color map to use.
alt_label : str
Altitude first y-axis label to use. If None, will try to use
long_name and units.
alt_field : str
Label for field in the object to plot on first y-axis.
cb_label : str
Colorbar label to use. If not set will try to use
long_name and units.
**kwargs : keyword arguments
Any other keyword arguments that will be passed
into TimeSeriesDisplay.plot module when the figure
is made.
"""
if dsname is None and len(self._obj.keys()) > 1:
raise ValueError(
'You must choose a datastream when there are 2 '
'or more datasets in the TimeSeriesDisplay '
'object.'
)
elif dsname is None:
dsname = list(self._obj.keys())[0]
# Get data and dimensions
data = self._obj[dsname][data_field]
altitude = self._obj[dsname][alt_field]
dim = list(self._obj[dsname][data_field].dims)
xdata = self._obj[dsname][dim[0]]
if alt_label is None:
try:
alt_label = altitude.attrs['long_name'] + ''.join(
[' (', altitude.attrs['units'], ')']
)
except KeyError:
alt_label = alt_field
if cb_label is None:
try:
cb_label = data.attrs['long_name'] + ''.join([' (', data.attrs['units'], ')'])
except KeyError:
cb_label = data_field
colorbar_map = plt.cm.get_cmap(cmap)
self.fig.subplots_adjust(left=0.1, right=0.86, bottom=0.16, top=0.91)
ax1 = self.plot(alt_field, color='black', **kwargs)
ax1.set_ylabel(alt_label)
ax2 = ax1.twinx()
sc = ax2.scatter(xdata.values, data.values, c=data.values, marker='.', cmap=colorbar_map)
cbaxes = self.fig.add_axes(
[
self.fig.subplotpars.right + 0.02,
self.fig.subplotpars.bottom,
0.02,
self.fig.subplotpars.top - self.fig.subplotpars.bottom,
]
)
cbar = plt.colorbar(sc, cax=cbaxes)
ax2.set_ylim(cbar.mappable.get_clim())
cbar.ax.set_ylabel(cb_label)
ax2.set_yticklabels([])
return self.axes[0]
def qc_flag_block_plot(
self,
data_field=None,
dsname=None,
subplot_index=(0,),
time_rng=None,
assessment_color=None,
edgecolor='face',
set_shading='auto',
**kwargs,
):
"""
Create a time series plot of embedded quality control values
using broken barh plotting.
Parameters
----------
data_field : str
Name of data field in the object to plot corresponding quality
control.
dsname : None or str
If there is more than one datastream in the display object the
name of the datastream needs to be specified. If set to None and
there is only one datastream ACT will use the sole datastream
in the object.
subplot_index : 1 or 2D tuple, list, or array
The index of the subplot to set the x range of.
time_rng : tuple or list
List or tuple with (min, max) values to set the x-axis range limits.
assessment_color : dict
Dictionary lookup to override default assessment to color. Make sure
assessment work is correctly set with case syntax.
edgecolor : str or list
Color name, list of color names or 'face' as defined in matplotlib.axes.Axes.broken_barh
set_shading : string
Option to to set the matplotlib.pcolormesh shading parameter.
Default to 'auto'
**kwargs : keyword arguments
The keyword arguments for :func:`plt.broken_barh`.
"""
# Color to plot associated with assessment.
color_lookup = {
'Bad': 'red',
'Incorrect': 'red',
'Indeterminate': 'orange',
'Suspect': 'orange',
'Missing': 'darkgray',
'Not Failing': 'green',
'Acceptable': 'green',
}
if assessment_color is not None:
for asses, color in assessment_color.items():
color_lookup[asses] = color
if asses == 'Incorrect':
color_lookup['Bad'] = color
if asses == 'Suspect':
color_lookup['Indeterminate'] = color
# Set up list of test names to use for missing values
missing_val_long_names = [
'Value equal to missing_value*',
'Value set to missing_value*',
'Value is equal to missing_value*',
'Value is set to missing_value*',
]
if dsname is None and len(self._obj.keys()) > 1:
raise ValueError(
'You must choose a datastream when there are 2 '
'or more datasets in the TimeSeriesDisplay '
'object.'
)
elif dsname is None:
dsname = list(self._obj.keys())[0]
# Set up or get current plot figure
if self.fig is None:
self.fig = plt.figure()
# Set up or get current axes
if self.axes is None:
self.axes = np.array([plt.axes()])
self.fig.add_axes(self.axes[0])
ax = self.axes[subplot_index]
# Set X Limit - We want the same time axes for all subplots
data = self._obj[dsname][data_field]
dim = list(self._obj[dsname][data_field].dims)
xdata = self._obj[dsname][dim[0]]
# Get data and attributes
qc_data_field = self._obj[dsname].qcfilter.check_for_ancillary_qc(
data_field, add_if_missing=False, cleanup=False
)
if qc_data_field is None:
raise ValueError(f'No quality control ancillary variable in Dataset for {data_field}')
flag_masks = self._obj[dsname][qc_data_field].attrs['flag_masks']
flag_meanings = self._obj[dsname][qc_data_field].attrs['flag_meanings']
flag_assessments = self._obj[dsname][qc_data_field].attrs['flag_assessments']
# Get time ranges for green blocks
time_delta = determine_time_delta(xdata.values)
barh_list_green = reduce_time_ranges(xdata.values, time_delta=time_delta, broken_barh=True)
# Set background to gray indicating not available data
ax.set_facecolor('dimgray')
# Check if plotting 2D data vs 1D data. 2D data will be summarized by
# assessment category instead of showing each test.
data_shape = self._obj[dsname][qc_data_field].shape
if len(data_shape) > 1:
cur_assessments = list(set(flag_assessments))
cur_assessments.sort()
cur_assessments.reverse()
qc_data = np.full(data_shape, -1, dtype=np.int16)
plot_colors = []
tick_names = []
index = self._obj[dsname][qc_data_field].values == 0
if index.any():
qc_data[index] = 0
plot_colors.append(color_lookup['Not Failing'])
tick_names.append('Not Failing')
for ii, assess in enumerate(cur_assessments):
if assess not in color_lookup:
color_lookup[assess] = list(mplcolors.CSS4_COLORS.keys())[ii]
ii += 1
assess_data = self._obj[dsname].qcfilter.get_masked_data(
data_field, rm_assessments=assess
)
if assess_data.mask.any():
qc_data[assess_data.mask] = ii
plot_colors.append(color_lookup[assess])
tick_names.append(assess)
# Overwrite missing data. Not sure if we want to do this because VAPs set
# the value to missing but the test is set to Bad. This tries to overcome that
# by looking for correct test description that would only indicate the values
# are missing not that they are set to missing by a test... most likely.
missing_test_nums = []
for ii, flag_meaning in enumerate(flag_meanings):
# Check if the bit set is indicating missing data.
for val in missing_val_long_names:
if re_search(val, flag_meaning):
test_num = parse_bit(flag_masks[ii])[0]
missing_test_nums.append(test_num)
assess_data = self._obj[dsname].qcfilter.get_masked_data(
data_field, rm_tests=missing_test_nums
)
if assess_data.mask.any():
qc_data[assess_data.mask] = -1
plot_colors.append(color_lookup['Missing'])
tick_names.append('Missing')
# Create a masked array to allow not plotting where values are missing
qc_data = np.ma.masked_equal(qc_data, -1)
dims = self._obj[dsname][qc_data_field].dims
xvalues = self._obj[dsname][dims[0]].values
yvalues = self._obj[dsname][dims[1]].values
cMap = mplcolors.ListedColormap(plot_colors)
mesh = ax.pcolormesh(
xvalues,
yvalues,
np.transpose(qc_data),
cmap=cMap,
vmin=0,
shading=set_shading,
)
divider = make_axes_locatable(ax)
# Determine correct placement of words on colorbar
tick_nums = (
np.arange(0, len(tick_names) * 2 + 1) / (len(tick_names) * 2) * np.nanmax(qc_data)
)[1::2]
cax = divider.append_axes('bottom', size='5%', pad=0.3)
cbar = self.fig.colorbar(
mesh,
cax=cax,
orientation='horizontal',
spacing='uniform',
ticks=tick_nums,
shrink=0.5,
)
cbar.ax.set_xticklabels(tick_names)
# Set YTitle
dim_name = list(set(self._obj[dsname][qc_data_field].dims) - {'time'})
try:
ytitle = f"{dim_name[0]} ({self._obj[dsname][dim_name[0]].attrs['units']})"
ax.set_ylabel(ytitle)
except KeyError:
pass
# Add which tests were set as text to the plot
unique_values = []
for ii in np.unique(self._obj[dsname][qc_data_field].values):
unique_values.extend(parse_bit(ii))
if len(unique_values) > 0:
unique_values = list(set(unique_values))
unique_values.sort()
unique_values = [str(ii) for ii in unique_values]
self.fig.text(
0.5,
-0.35,
f"QC Tests Tripped: {', '.join(unique_values)}",
transform=ax.transAxes,
horizontalalignment='center',
verticalalignment='center',
fontweight='bold',
)
else:
test_nums = []
for ii, assess in enumerate(flag_assessments):
if assess not in color_lookup:
color_lookup[assess] = list(mplcolors.CSS4_COLORS.keys())[ii]
# Plot green data first.
ax.broken_barh(
barh_list_green,
(ii, ii + 1),
facecolors=color_lookup['Not Failing'],
edgecolor=edgecolor,
**kwargs,
)
# Get test number from flag_mask bitpacked number
test_nums.append(parse_bit(flag_masks[ii]))
# Get masked array data to use mask for finding if/where test is set
data = self._obj[dsname].qcfilter.get_masked_data(
data_field, rm_tests=test_nums[-1]
)
if np.any(data.mask):
# Get time ranges from time and masked data
barh_list = reduce_time_ranges(
xdata.values[data.mask], time_delta=time_delta, broken_barh=True
)
# Check if the bit set is indicating missing data. If so change
# to different plotting color than what is in flag_assessments.
for val in missing_val_long_names:
if re_search(val, flag_meanings[ii]):
assess = 'Missing'
break
# Lay down blocks of tripped tests using correct color
ax.broken_barh(
barh_list,
(ii, ii + 1),
facecolors=color_lookup[assess],
edgecolor=edgecolor,
**kwargs,
)
# Add test description to plot.
ax.text(xdata.values[0], ii + 0.5, ' ' + flag_meanings[ii], va='center')
# Change y ticks to test number
plt.yticks(
[ii + 0.5 for ii in range(0, len(test_nums))],
labels=['Test ' + str(ii[0]) for ii in test_nums],
)
# Set ylimit to number of tests plotted
ax.set_ylim(0, len(flag_assessments))
# Set X Limit - We want the same time axes for all subplots
if not hasattr(self, 'time_rng'):
if time_rng is not None:
self.time_rng = list(time_rng)
else:
self.time_rng = [xdata.min().values, xdata.max().values]
self.set_xrng(self.time_rng, subplot_index)
# Get X format - We want the same time axes for all subplots
if hasattr(self, 'time_fmt'):
ax.xaxis.set_major_formatter(self.time_fmt)
else:
# Set X Format
if len(subplot_index) == 1:
days = self.xrng[subplot_index, 1] - self.xrng[subplot_index, 0]
else:
days = (
self.xrng[subplot_index[0], subplot_index[1], 1]
- self.xrng[subplot_index[0], subplot_index[1], 0]
)
myFmt = common.get_date_format(days)
ax.xaxis.set_major_formatter(myFmt)
self.time_fmt = myFmt
return self.axes[subplot_index]
def fill_between(
self,
field,
dsname=None,
subplot_index=(0,),
set_title=None,
secondary_y=False,
**kwargs,
):
"""
Makes a fill_between plot, based on matplotlib
Parameters
----------
field : str
The name of the field to plot.
dsname : None or str
If there is more than one datastream in the display object the
name of the datastream needs to be specified. If set to None and
there is only one datastream ACT will use the sole datastream
in the object.
subplot_index : 1 or 2D tuple, list, or array
The index of the subplot to set the x range of.
set_title : str
The title for the plot.
secondary_y : boolean
Option to indicate if the data should be plotted on second y-axis.
**kwargs : keyword arguments
The keyword arguments for :func:`plt.plot` (1D timeseries) or
:func:`plt.pcolormesh` (2D timeseries).
Returns
-------
ax : matplotlib axis handle
The matplotlib axis handle of the plot.
"""
if dsname is None and len(self._obj.keys()) > 1:
raise ValueError(
'You must choose a datastream when there are 2 '
'or more datasets in the TimeSeriesDisplay '
'object.'
)
elif dsname is None:
dsname = list(self._obj.keys())[0]
# Get data and dimensions
data = self._obj[dsname][field]
dim = list(self._obj[dsname][field].dims)
xdata = self._obj[dsname][dim[0]]
if 'units' in data.attrs:
ytitle = ''.join(['(', data.attrs['units'], ')'])
else:
ytitle = field
# Get the current plotting axis, add day/night background and plot data
if self.fig is None:
self.fig = plt.figure()
if self.axes is None:
self.axes = np.array([plt.axes()])
self.fig.add_axes(self.axes[0])
# Set ax to appropriate axis
if secondary_y is False:
ax = self.axes[subplot_index]
else:
ax = self.axes[subplot_index].twinx()
ax.fill_between(xdata.values, data, **kwargs)
# Set X Format
if len(subplot_index) == 1:
days = self.xrng[subplot_index, 1] - self.xrng[subplot_index, 0]
else:
days = (
self.xrng[subplot_index[0], subplot_index[1], 1]
- self.xrng[subplot_index[0], subplot_index[1], 0]
)
myFmt = common.get_date_format(days)
ax.xaxis.set_major_formatter(myFmt)
# Set X format - We want the same time axes for all subplots
if not hasattr(self, 'time_fmt'):
self.time_fmt = myFmt
# Put on an xlabel, but only if we are making the bottom-most plot
if subplot_index[0] == self.axes.shape[0] - 1:
self.axes[subplot_index].set_xlabel('Time [UTC]')
# Set YTitle
ax.set_ylabel(ytitle)
# Set Title
if set_title is None:
set_title = ' '.join(
[
dsname,
field,
'on',
dt_utils.numpy_to_arm_date(self._obj[dsname].time.values[0]),
]
)
if secondary_y is False:
ax.set_title(set_title)
return self.axes[subplot_index]
<|code_end|>
examples/plot_multiple_column.py
<|code_start|>"""
Plot a timeseries of sounding data
----------------------------------------------------
This is a simple example for how to plot multiple columns
in a TimeseriesDisplay.
Author: Maxwell Grover
"""
from matplotlib import pyplot as plt
import act
files = act.tests.sample_files.EXAMPLE_MET_WILDCARD
met = act.io.armfiles.read_netcdf(files)
# Plot data
display = act.plotting.TimeSeriesDisplay(met)
display.add_subplots((3, 2), figsize=(15, 10))
display.plot('temp_mean', color='tab:red', subplot_index=(0, 0))
display.plot('rh_mean', color='tab:green', subplot_index=(1, 0))
display.plot('wdir_vec_mean', subplot_index=(2, 0))
display.plot('temp_std', color='tab:red', subplot_index=(0, 1))
display.plot('rh_std', color='tab:green', subplot_index=(1, 1))
display.plot('wdir_vec_std', subplot_index=(2, 1))
plt.show()
<|code_end|>
|
ARM Data API Returns Text when File not Available
I tried to download gucceilpblhtM1.a0 but it's not available through the webservice due to it being an a0-level file. Instead the API returned text that was included in the file. We should put in a check to ensure that these cases are caught and files are not produced.
```
This data file is not available on /data/archive. To download this file, please an order via Data Discovery. https://adc.arm.gov/discovery
```
|
act/discovery/get_armfiles.py
<|code_start|>"""
Script for downloading data from ARM's Live Data Webservice
"""
import argparse
import json
import os
import sys
from datetime import timedelta
try:
from urllib.request import urlopen
except ImportError:
from urllib import urlopen
from act.utils import date_parser
def download_data(username, token, datastream, startdate, enddate, time=None, output=None):
"""
This tool will help users utilize the ARM Live Data Webservice to download
ARM data.
Parameters
----------
username : str
The username to use for logging into the ADC archive.
token : str
The access token for accessing the ADC archive.
datastream : str
The name of the datastream to acquire.
startdate : str
The start date of the data to acquire. Formats accepted are
YYYY-MM-DD, DD.MM.YYYY, DD/MM/YYYY, YYYYMMDD, YYYY/MM/DD or
any of the previous formats with THH:MM:SS added onto the end
(ex. 2020-09-15T12:00:00).
enddate : str
The end date of the data to acquire. Formats accepted are
YYYY-MM-DD, DD.MM.YYYY, DD/MM/YYYY, YYYYMMDD or YYYY/MM/DD, or
any of the previous formats with THH:MM:SS added onto the end
(ex. 2020-09-15T13:00:00).
time: str or None
The specific time. Format is HHMMSS. Set to None to download all files
in the given date interval.
output : str
The output directory for the data. Set to None to make a folder in the
current working directory with the same name as *datastream* to place
the files in.
Returns
-------
files : list
Returns list of files retrieved
Notes
-----
This programmatic interface allows users to query and automate
machine-to-machine downloads of ARM data. This tool uses a REST URL and
specific parameters (saveData, query), user ID and access token, a
datastream name, a start date, and an end date, and data files matching
the criteria will be returned to the user and downloaded.
By using this web service, users can setup cron jobs and automatically
download data from /data/archive into their workspace. This will also
eliminate the manual step of following a link in an email to download data.
All other data files, which are not on the spinning
disk (on HPSS), will have to go through the regular ordering process.
More information about this REST API and tools can be found on `ARM Live
<https://adc.arm.gov/armlive/#scripts>`_.
To login/register for an access token click `here
<https://adc.arm.gov/armlive/livedata/home>`_.
Author: Michael Giansiracusa
Email: [email protected]
Web Tools Contact: Ranjeet Devarakonda [email protected]
Examples
--------
This code will download the netCDF files from the sgpmetE13.b1 datastream
and place them in a directory named sgpmetE13.b1. The data from 14 Jan to
20 Jan 2017 will be downloaded. Replace *userName* and *XXXXXXXXXXXXXXXX*
with your username and token for ARM Data Discovery. See the Notes for
information on how to obtain a username and token.
.. code-block:: python
act.discovery.download_data(
"userName", "XXXXXXXXXXXXXXXX", "sgpmetE13.b1", "2017-01-14", "2017-01-20"
)
"""
# default start and end are empty
start, end = '', ''
# start and end strings for query_url are constructed
# if the arguments were provided
if startdate:
start_datetime = date_parser(startdate, return_datetime=True)
start = start_datetime.strftime('%Y-%m-%dT%H:%M:%S.%f')[:-3] + 'Z'
start = f'&start={start}'
if enddate:
end_datetime = date_parser(enddate, return_datetime=True)
# If the start and end date are the same, and a day to the end date
if start_datetime == end_datetime:
end_datetime += timedelta(hours=23, minutes=59, seconds=59)
end = end_datetime.strftime('%Y-%m-%dT%H:%M:%S.%f')[:-3] + 'Z'
end = f'&end={end}'
# build the url to query the web service using the arguments provided
query_url = (
'https://adc.arm.gov/armlive/livedata/query?' + 'user={0}&ds={1}{2}{3}&wt=json'
).format(':'.join([username, token]), datastream, start, end)
# get url response, read the body of the message,
# and decode from bytes type to utf-8 string
response_body = urlopen(query_url).read().decode('utf-8')
# if the response is an html doc, then there was an error with the user
if response_body[1:14] == '!DOCTYPE html':
raise ConnectionRefusedError('Error with user. Check username or token.')
# parse into json object
response_body_json = json.loads(response_body)
# construct output directory
if output:
# output files to directory specified
output_dir = os.path.join(output)
else:
# if no folder given, add datastream folder
# to current working dir to prevent file mix-up
output_dir = os.path.join(os.getcwd(), datastream)
# not testing, response is successful and files were returned
if response_body_json is None:
print('ARM Data Live Webservice does not appear to be functioning')
return []
num_files = len(response_body_json['files'])
file_names = []
if response_body_json['status'] == 'success' and num_files > 0:
for fname in response_body_json['files']:
if time is not None:
if time not in fname:
continue
print(f'[DOWNLOADING] {fname}')
# construct link to web service saveData function
save_data_url = (
'https://adc.arm.gov/armlive/livedata/' + 'saveData?user={0}&file={1}'
).format(':'.join([username, token]), fname)
output_file = os.path.join(output_dir, fname)
# make directory if it doesn't exist
if not os.path.isdir(output_dir):
os.makedirs(output_dir)
# create file and write bytes to file
with open(output_file, 'wb') as open_bytes_file:
open_bytes_file.write(urlopen(save_data_url).read())
file_names.append(output_file)
else:
print(
'No files returned or url status error.\n' 'Check datastream name, start, and end date.'
)
return file_names
<|code_end|>
|
act/discovery/get_armfiles.py
<|code_start|>"""
Script for downloading data from ARM's Live Data Webservice
"""
import argparse
import contextlib
import json
import os
import sys
from datetime import timedelta
try:
from urllib.request import urlopen
except ImportError:
from urllib import urlopen
from act.utils import date_parser
def download_data(username, token, datastream, startdate, enddate, time=None, output=None):
"""
This tool will help users utilize the ARM Live Data Webservice to download
ARM data.
Parameters
----------
username : str
The username to use for logging into the ADC archive.
token : str
The access token for accessing the ADC archive.
datastream : str
The name of the datastream to acquire.
startdate : str
The start date of the data to acquire. Formats accepted are
YYYY-MM-DD, DD.MM.YYYY, DD/MM/YYYY, YYYYMMDD, YYYY/MM/DD or
any of the previous formats with THH:MM:SS added onto the end
(ex. 2020-09-15T12:00:00).
enddate : str
The end date of the data to acquire. Formats accepted are
YYYY-MM-DD, DD.MM.YYYY, DD/MM/YYYY, YYYYMMDD or YYYY/MM/DD, or
any of the previous formats with THH:MM:SS added onto the end
(ex. 2020-09-15T13:00:00).
time: str or None
The specific time. Format is HHMMSS. Set to None to download all files
in the given date interval.
output : str
The output directory for the data. Set to None to make a folder in the
current working directory with the same name as *datastream* to place
the files in.
Returns
-------
files : list
Returns list of files retrieved
Notes
-----
This programmatic interface allows users to query and automate
machine-to-machine downloads of ARM data. This tool uses a REST URL and
specific parameters (saveData, query), user ID and access token, a
datastream name, a start date, and an end date, and data files matching
the criteria will be returned to the user and downloaded.
By using this web service, users can setup cron jobs and automatically
download data from /data/archive into their workspace. This will also
eliminate the manual step of following a link in an email to download data.
All other data files, which are not on the spinning
disk (on HPSS), will have to go through the regular ordering process.
More information about this REST API and tools can be found on `ARM Live
<https://adc.arm.gov/armlive/#scripts>`_.
To login/register for an access token click `here
<https://adc.arm.gov/armlive/livedata/home>`_.
Author: Michael Giansiracusa
Email: [email protected]
Web Tools Contact: Ranjeet Devarakonda [email protected]
Examples
--------
This code will download the netCDF files from the sgpmetE13.b1 datastream
and place them in a directory named sgpmetE13.b1. The data from 14 Jan to
20 Jan 2017 will be downloaded. Replace *userName* and *XXXXXXXXXXXXXXXX*
with your username and token for ARM Data Discovery. See the Notes for
information on how to obtain a username and token.
.. code-block:: python
act.discovery.download_data(
"userName", "XXXXXXXXXXXXXXXX", "sgpmetE13.b1", "2017-01-14", "2017-01-20"
)
"""
# default start and end are empty
start, end = '', ''
# start and end strings for query_url are constructed
# if the arguments were provided
if startdate:
start_datetime = date_parser(startdate, return_datetime=True)
start = start_datetime.strftime('%Y-%m-%dT%H:%M:%S.%f')[:-3] + 'Z'
start = f'&start={start}'
if enddate:
end_datetime = date_parser(enddate, return_datetime=True)
# If the start and end date are the same, and a day to the end date
if start_datetime == end_datetime:
end_datetime += timedelta(hours=23, minutes=59, seconds=59)
end = end_datetime.strftime('%Y-%m-%dT%H:%M:%S.%f')[:-3] + 'Z'
end = f'&end={end}'
# build the url to query the web service using the arguments provided
query_url = (
'https://adc.arm.gov/armlive/livedata/query?' + 'user={0}&ds={1}{2}{3}&wt=json'
).format(':'.join([username, token]), datastream, start, end)
# get url response, read the body of the message,
# and decode from bytes type to utf-8 string
response_body = urlopen(query_url).read().decode('utf-8')
# if the response is an html doc, then there was an error with the user
if response_body[1:14] == '!DOCTYPE html':
raise ConnectionRefusedError('Error with user. Check username or token.')
# parse into json object
response_body_json = json.loads(response_body)
# construct output directory
if output:
# output files to directory specified
output_dir = os.path.join(output)
else:
# if no folder given, add datastream folder
# to current working dir to prevent file mix-up
output_dir = os.path.join(os.getcwd(), datastream)
# not testing, response is successful and files were returned
if response_body_json is None:
print('ARM Data Live Webservice does not appear to be functioning')
return []
num_files = len(response_body_json['files'])
file_names = []
if response_body_json['status'] == 'success' and num_files > 0:
for fname in response_body_json['files']:
if time is not None:
if time not in fname:
continue
# construct link to web service saveData function
save_data_url = (
'https://adc.arm.gov/armlive/livedata/' + 'saveData?user={0}&file={1}'
).format(':'.join([username, token]), fname)
output_file = os.path.join(output_dir, fname)
# if datastream is not in data archive, return error
with contextlib.closing(urlopen(save_data_url).read().decode().lower()) as check:
if 'this data file is not available' in check:
raise OSError(
"The datastream '" + datastream
+ "' is not available on /data/archive. To download "
+ "this file, place an order via Data Discovery. "
+ "https://adc.arm.gov/discovery")
# make directory if it doesn't exist
if not os.path.isdir(output_dir):
os.makedirs(output_dir)
# create file and write bytes to file
with open(output_file, 'wb') as open_bytes_file:
print(f'[DOWNLOADING] {fname}')
open_bytes_file.write(urlopen(save_data_url).read())
file_names.append(output_file)
else:
print(
'No files returned or url status error.\n' 'Check datastream name, start, and end date.'
)
return file_names
<|code_end|>
|
Accessor not available in dataset
I fetched the latest updates after the lazy_loading PR and ran pytest and am seeing a lot of errors with accessors not loading. Clean, QCFilter, and QCTests are no longer available in the datasets for some reason.
FAILED test_io.py::test_io_mfdataset - AttributeError: 'Dataset' object has no attribute 'clean'
FAILED test_io.py::test_io_write - AttributeError: 'Dataset' object has no attribute 'clean'
FAILED test_io.py::test_clean_cf_qc - AttributeError: 'Dataset' object has no attribute 'clean'
|
act/__init__.py
<|code_start|>"""
ACT: The Atmospheric Community Toolkit
======================================
"""
import lazy_loader as lazy
# No more pandas warnings
from pandas.plotting import register_matplotlib_converters
from . import tests
from ._version import get_versions
register_matplotlib_converters()
# Import the lazy loaded modules
submodules = [
'corrections',
'discovery',
'io',
'qc',
'utils',
'retrievals',
'plotting',
]
__getattr__, __dir__, _ = lazy.attach(__name__, submodules)
# Version for source builds
vdict = get_versions()
__version__ = vdict['version']
<|code_end|>
act/qc/__init__.py
<|code_start|>"""
This module contains procedures for working with QC information
and for applying tests to data.
"""
import lazy_loader as lazy
# We need to import clean first to register the accessor
from .clean import *
__getattr__, __dir__, __all__ = lazy.attach(
__name__,
submodules=[
'add_supplemental_qc',
'arm',
'bsrn_tests',
'comparison_tests',
'qcfilter',
'qctests',
'radiometer_tests',
'sp2',
],
submod_attrs={
'arm': ['add_dqr_to_qc'],
'qcfilter': ['QCFilter'],
'qctests': ['QCTests'],
'radiometer_tests': ['fft_shading_test'],
'bsrn_tests': ['QCTests'],
'comparison_tests': ['QCTests'],
'add_supplemental_qc': ['read_yaml_supplemental_qc'],
'sp2': ['SP2ParticleCriteria', 'get_waveform_statistics'],
},
)
<|code_end|>
|
act/__init__.py
<|code_start|>"""
ACT: The Atmospheric Community Toolkit
======================================
"""
import lazy_loader as lazy
# No more pandas warnings
from pandas.plotting import register_matplotlib_converters
from . import tests
from ._version import get_versions
from .qc import QCFilter, QCTests, clean
register_matplotlib_converters()
# Import the lazy loaded modules
submodules = [
'corrections',
'discovery',
'io',
'qc',
'utils',
'retrievals',
'plotting',
]
__getattr__, __dir__, _ = lazy.attach(__name__, submodules)
# Version for source builds
vdict = get_versions()
__version__ = vdict['version']
<|code_end|>
act/qc/__init__.py
<|code_start|>"""
This module contains procedures for working with QC information
and for applying tests to data.
"""
import lazy_loader as lazy
# We need to import clean first to register the accessor
from .clean import *
from .qcfilter import QCFilter
from .qctests import QCTests
__getattr__, __dir__, __all__ = lazy.attach(
__name__,
submodules=[
'add_supplemental_qc',
'arm',
'bsrn_tests',
'comparison_tests',
'qcfilter',
'qctests',
'radiometer_tests',
'sp2',
],
submod_attrs={
'arm': ['add_dqr_to_qc'],
'qcfilter': ['QCFilter'],
'qctests': ['QCTests'],
'radiometer_tests': ['fft_shading_test'],
'bsrn_tests': ['QCTests'],
'comparison_tests': ['QCTests'],
'add_supplemental_qc': ['read_yaml_supplemental_qc'],
'sp2': ['SP2ParticleCriteria', 'get_waveform_statistics'],
},
)
<|code_end|>
|
ICARTT formatting error when using cleanup
When I read in an ICARTT file and then try and run obj.clean.cleanup() to get it to CF standards, it throws the following error. We should look at making the object compliant with what's needed for this cleanup.
```
Traceback (most recent call last):
File "/Users/atheisen/opt/anaconda3/lib/python3.8/site-packages/xarray/core/dataset.py", line 1395, in _construct_dataarray
variable = self._variables[name]
KeyError: 'flag'
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "plot_icartt.py", line 22, in <module>
ds.clean.cleanup()
File "/Users/atheisen/Code/ACT/act/qc/clean.py", line 151, in cleanup
self._obj.clean.link_variables()
File "/Users/atheisen/Code/ACT/act/qc/clean.py", line 583, in link_variables
self._obj[variable].attrs['ancillary_variables'] = copy.copy(ancillary_variables)
File "/Users/atheisen/opt/anaconda3/lib/python3.8/site-packages/xarray/core/dataset.py", line 1499, in __getitem__
return self._construct_dataarray(key)
File "/Users/atheisen/opt/anaconda3/lib/python3.8/site-packages/xarray/core/dataset.py", line 1397, in _construct_dataarray
_, name, variable = _get_virtual_variable(
File "/Users/atheisen/opt/anaconda3/lib/python3.8/site-packages/xarray/core/dataset.py", line 170, in _get_virtual_variable
ref_var = variables[ref_name]
KeyError: 'flag'
```
|
act/io/icartt.py
<|code_start|>"""
Modules for Reading/Writing the International Consortium for Atmospheric
Research on Transport and Transformation (ICARTT) file format standards V2.0
References:
ICARTT V2.0 Standards/Conventions:
- https://www.earthdata.nasa.gov/s3fs-public/imported/ESDS-RFC-029v2.pdf
"""
import numpy as np
import xarray as xr
try:
import icartt
_ICARTT_AVAILABLE = True
_format = icartt.Formats.FFI1001
except ImportError:
_ICARTT_AVAILABLE = False
_format = None
def read_icartt(filename, format=_format,
return_None=False, **kwargs):
"""
Returns `xarray.Dataset` with stored data and metadata from a user-defined
query of ICARTT from a single datastream. Has some procedures to ensure
time is correctly fomatted in returned Dataset.
Parameters
----------
filename : str
Name of file to read.
format : str
ICARTT Format to Read: FFI1001 or FFI2110.
return_None : bool, optional
Catch IOError exception when file not found and return None.
Default is False.
**kwargs : keywords
keywords to pass on through to icartt.Dataset.
Returns
-------
act_obj : Object (or None)
ACT dataset (or None if no data file(s) found).
Examples
--------
This example will load the example sounding data used for unit testing.
.. code-block :: python
import act
the_ds = act.io.icartt.read_icartt(act.tests.sample_files.AAF_SAMPLE_FILE)
print(the_ds.attrs['_datastream'])
"""
if not _ICARTT_AVAILABLE:
raise ImportError(
"ICARTT is required to use to read ICARTT files but is " +
"not installed")
ds = None
# Create an exception tuple to use with try statements. Doing it this way
# so we can add the FileNotFoundError if requested. Can add more error
# handling in the future.
except_tuple = (ValueError,)
if return_None:
except_tuple = except_tuple + (FileNotFoundError, OSError)
try:
# Read data file with ICARTT dataset.
ict = icartt.Dataset(filename, format=format, **kwargs)
except except_tuple as exception:
# If requested return None for File not found error
if type(exception).__name__ == 'FileNotFoundError':
return None
# If requested return None for File not found error
if (type(exception).__name__ == 'OSError'
and exception.args[0] == 'no files to open'):
return None
# Define the Uncertainty for each variable. Note it may not be calculated.
# If not calculated, assign 'N/A' to the attribute
uncertainty = ict.normalComments[6].split(':')[1].split(',')
# Define the Upper and Lower Limit of Detection Flags
ulod_flag = ict.normalComments[7].split(':')[1]
ulod_value = ict.normalComments[8].split(':')[1].split(',')
llod_flag = ict.normalComments[9].split(':')[1]
llod_value = ict.normalComments[10].split(':')[1].split(',')
# Convert ICARTT Object to Xarray Dataset
ds_container = []
# Counter for uncertainty/LOD values
counter = 0
# Loop over ICART variables, convert to Xarray DataArray, Append.
for key in ict.variables:
# Note time is the only independent variable within ICARTT
# Short name for time must be "Start_UTC" for ICARTT files.
if key != 'Start_UTC':
da = xr.DataArray(ict.data[key],
coords=dict(time=ict.times),
name=key, dims=['time'])
# Assume if Uncertainity does not match the number of variables,
# values were not set within the file. Needs to be string!
if len(uncertainty) != len(ict.variables):
da.attrs['uncertainty'] = 'N/A'
else:
da.attrs['uncertainty'] = uncertainty[counter]
# Assume if ULOD does not match the number of variables within the
# the file, ULOD values were not set.
if len(ulod_value) != len(ict.variables):
da.attrs['ULOD_Value'] = 'N/A'
else:
da.attrs['ULOD_Value'] = ulod_value[counter]
# Assume if LLOD does not match the number of variables within the
# the file, LLOD values were not set.
if len(llod_value) != len(ict.variables):
da.attrs['LLOD_Value'] = 'N/A'
else:
da.attrs['LLOD_Value'] = llod_value[counter]
# Define the meta data:
da.attrs['units'] = ict.variables[key].units
da.attrs['mvc'] = ict.variables[key].miss
da.attrs['scale_factor'] = ict.variables[key].scale
da.attrs['ULOD_Flag'] = ulod_flag
da.attrs['LLOD_Flag'] = llod_flag
# Append to ds container
ds_container.append(da.to_dataset(name=key))
# up the counter
counter += 1
# Concatenate each of the Xarray DataArrays into a single Xarray DataSet
ds = xr.merge(ds_container)
# Assign ICARTT Meta data to Xarray DataSet
ds.attrs['PI'] = ict.PIName
ds.attrs['PI_Affiliation'] = ict.PIAffiliation
ds.attrs['Platform'] = ict.dataSourceDescription
ds.attrs['Mission'] = ict.missionName
ds.attrs['DateOfCollection'] = ict.dateOfCollection
ds.attrs['DateOfRevision'] = ict.dateOfRevision
ds.attrs['Data_Interval'] = ict.dataIntervalCode
ds.attrs['Independent_Var'] = str(ict.independentVariable)
ds.attrs['Dependent_Var_Num'] = len(ict.dependentVariables)
ds.attrs['PI_Contact'] = ict.normalComments[0].split('\n')[0].split(':')[-1]
ds.attrs['Platform'] = ict.normalComments[1].split(':')[-1]
ds.attrs['Location'] = ict.normalComments[2].split(':')[-1]
ds.attrs['Associated_Data'] = ict.normalComments[3].split(':')[-1]
ds.attrs['Instrument_Info'] = ict.normalComments[4].split(':')[-1]
ds.attrs['Data_Info'] = ict.normalComments[5][11:]
ds.attrs['DM_Contact'] = ict.normalComments[11].split(':')[-1]
ds.attrs['Project_Info'] = ict.normalComments[12].split(':')[-1]
ds.attrs['Stipulations'] = ict.normalComments[13].split(':')[-1]
ds.attrs['Comments'] = ict.normalComments[14].split(':')[-1]
ds.attrs['Revision'] = ict.normalComments[15].split(':')[-1]
ds.attrs['Revision_Comments'] = ict.normalComments[15 + 1].split(':')[-1]
# Assign Additional ARM meta data to Xarray DatatSet
ds.attrs['_datastream'] = filename.split('/')[-1].split('_')[0]
# Return Xarray Dataset
return ds
<|code_end|>
|
act/io/icartt.py
<|code_start|>"""
Modules for Reading/Writing the International Consortium for Atmospheric
Research on Transport and Transformation (ICARTT) file format standards V2.0
References:
ICARTT V2.0 Standards/Conventions:
- https://www.earthdata.nasa.gov/s3fs-public/imported/ESDS-RFC-029v2.pdf
"""
import numpy as np
import xarray as xr
try:
import icartt
_ICARTT_AVAILABLE = True
_format = icartt.Formats.FFI1001
except ImportError:
_ICARTT_AVAILABLE = False
_format = None
def read_icartt(filename, format=_format,
return_None=False, **kwargs):
"""
Returns `xarray.Dataset` with stored data and metadata from a user-defined
query of ICARTT from a single datastream. Has some procedures to ensure
time is correctly fomatted in returned Dataset.
Parameters
----------
filename : str
Name of file to read.
format : str
ICARTT Format to Read: FFI1001 or FFI2110.
return_None : bool, optional
Catch IOError exception when file not found and return None.
Default is False.
**kwargs : keywords
keywords to pass on through to icartt.Dataset.
Returns
-------
act_obj : Object (or None)
ACT dataset (or None if no data file(s) found).
Examples
--------
This example will load the example sounding data used for unit testing.
.. code-block :: python
import act
the_ds = act.io.icartt.read_icartt(act.tests.sample_files.AAF_SAMPLE_FILE)
print(the_ds.attrs['_datastream'])
"""
if not _ICARTT_AVAILABLE:
raise ImportError(
"ICARTT is required to use to read ICARTT files but is " +
"not installed")
ds = None
# Create an exception tuple to use with try statements. Doing it this way
# so we can add the FileNotFoundError if requested. Can add more error
# handling in the future.
except_tuple = (ValueError,)
if return_None:
except_tuple = except_tuple + (FileNotFoundError, OSError)
try:
# Read data file with ICARTT dataset.
ict = icartt.Dataset(filename, format=format, **kwargs)
except except_tuple as exception:
# If requested return None for File not found error
if type(exception).__name__ == 'FileNotFoundError':
return None
# If requested return None for File not found error
if (type(exception).__name__ == 'OSError'
and exception.args[0] == 'no files to open'):
return None
# Define the Uncertainty for each variable. Note it may not be calculated.
# If not calculated, assign 'N/A' to the attribute
uncertainty = ict.normalComments[6].split(':')[1].split(',')
# Define the Upper and Lower Limit of Detection Flags
ulod_flag = ict.normalComments[7].split(':')[1]
ulod_value = ict.normalComments[8].split(':')[1].split(',')
llod_flag = ict.normalComments[9].split(':')[1]
llod_value = ict.normalComments[10].split(':')[1].split(',')
# Convert ICARTT Object to Xarray Dataset
ds_container = []
# Counter for uncertainty/LOD values
counter = 0
# Loop over ICART variables, convert to Xarray DataArray, Append.
for key in ict.variables:
# Note time is the only independent variable within ICARTT
# Short name for time must be "Start_UTC" for ICARTT files.
if key != 'Start_UTC':
if key == 'qc_flag':
key2 = 'quality_flag'
else:
key2 = key
da = xr.DataArray(ict.data[key],
coords=dict(time=ict.times),
name=key2, dims=['time'])
# Assume if Uncertainity does not match the number of variables,
# values were not set within the file. Needs to be string!
if len(uncertainty) != len(ict.variables):
da.attrs['uncertainty'] = 'N/A'
else:
da.attrs['uncertainty'] = uncertainty[counter]
# Assume if ULOD does not match the number of variables within the
# the file, ULOD values were not set.
if len(ulod_value) != len(ict.variables):
da.attrs['ULOD_Value'] = 'N/A'
else:
da.attrs['ULOD_Value'] = ulod_value[counter]
# Assume if LLOD does not match the number of variables within the
# the file, LLOD values were not set.
if len(llod_value) != len(ict.variables):
da.attrs['LLOD_Value'] = 'N/A'
else:
da.attrs['LLOD_Value'] = llod_value[counter]
# Define the meta data:
da.attrs['units'] = ict.variables[key].units
da.attrs['mvc'] = ict.variables[key].miss
da.attrs['scale_factor'] = ict.variables[key].scale
da.attrs['ULOD_Flag'] = ulod_flag
da.attrs['LLOD_Flag'] = llod_flag
# Append to ds container
ds_container.append(da.to_dataset(name=key2))
# up the counter
counter += 1
# Concatenate each of the Xarray DataArrays into a single Xarray DataSet
ds = xr.merge(ds_container)
# Assign ICARTT Meta data to Xarray DataSet
ds.attrs['PI'] = ict.PIName
ds.attrs['PI_Affiliation'] = ict.PIAffiliation
ds.attrs['Platform'] = ict.dataSourceDescription
ds.attrs['Mission'] = ict.missionName
ds.attrs['DateOfCollection'] = ict.dateOfCollection
ds.attrs['DateOfRevision'] = ict.dateOfRevision
ds.attrs['Data_Interval'] = ict.dataIntervalCode
ds.attrs['Independent_Var'] = str(ict.independentVariable)
ds.attrs['Dependent_Var_Num'] = len(ict.dependentVariables)
ds.attrs['PI_Contact'] = ict.normalComments[0].split('\n')[0].split(':')[-1]
ds.attrs['Platform'] = ict.normalComments[1].split(':')[-1]
ds.attrs['Location'] = ict.normalComments[2].split(':')[-1]
ds.attrs['Associated_Data'] = ict.normalComments[3].split(':')[-1]
ds.attrs['Instrument_Info'] = ict.normalComments[4].split(':')[-1]
ds.attrs['Data_Info'] = ict.normalComments[5][11:]
ds.attrs['DM_Contact'] = ict.normalComments[11].split(':')[-1]
ds.attrs['Project_Info'] = ict.normalComments[12].split(':')[-1]
ds.attrs['Stipulations'] = ict.normalComments[13].split(':')[-1]
ds.attrs['Comments'] = ict.normalComments[14].split(':')[-1]
ds.attrs['Revision'] = ict.normalComments[15].split(':')[-1]
ds.attrs['Revision_Comments'] = ict.normalComments[15 + 1].split(':')[-1]
# Assign Additional ARM meta data to Xarray DatatSet
ds.attrs['_datastream'] = filename.split('/')[-1].split('_')[0]
# Return Xarray Dataset
return ds
<|code_end|>
|
ARM Data API Returns Text when File not Available
I tried to download gucceilpblhtM1.a0 but it's not available through the webservice due to it being an a0-level file. Instead the API returned text that was included in the file. We should put in a check to ensure that these cases are caught and files are not produced.
```
This data file is not available on /data/archive. To download this file, please an order via Data Discovery. https://adc.arm.gov/discovery
```
|
act/discovery/get_armfiles.py
<|code_start|>"""
Script for downloading data from ARM's Live Data Webservice
"""
import argparse
import json
import os
import sys
from datetime import timedelta
try:
from urllib.request import urlopen
except ImportError:
from urllib import urlopen
from act.utils import date_parser
def download_data(username, token, datastream, startdate, enddate, time=None, output=None):
"""
This tool will help users utilize the ARM Live Data Webservice to download
ARM data.
Parameters
----------
username : str
The username to use for logging into the ADC archive.
token : str
The access token for accessing the ADC archive.
datastream : str
The name of the datastream to acquire.
startdate : str
The start date of the data to acquire. Formats accepted are
YYYY-MM-DD, DD.MM.YYYY, DD/MM/YYYY, YYYYMMDD, YYYY/MM/DD or
any of the previous formats with THH:MM:SS added onto the end
(ex. 2020-09-15T12:00:00).
enddate : str
The end date of the data to acquire. Formats accepted are
YYYY-MM-DD, DD.MM.YYYY, DD/MM/YYYY, YYYYMMDD or YYYY/MM/DD, or
any of the previous formats with THH:MM:SS added onto the end
(ex. 2020-09-15T13:00:00).
time: str or None
The specific time. Format is HHMMSS. Set to None to download all files
in the given date interval.
output : str
The output directory for the data. Set to None to make a folder in the
current working directory with the same name as *datastream* to place
the files in.
Returns
-------
files : list
Returns list of files retrieved
Notes
-----
This programmatic interface allows users to query and automate
machine-to-machine downloads of ARM data. This tool uses a REST URL and
specific parameters (saveData, query), user ID and access token, a
datastream name, a start date, and an end date, and data files matching
the criteria will be returned to the user and downloaded.
By using this web service, users can setup cron jobs and automatically
download data from /data/archive into their workspace. This will also
eliminate the manual step of following a link in an email to download data.
All other data files, which are not on the spinning
disk (on HPSS), will have to go through the regular ordering process.
More information about this REST API and tools can be found on `ARM Live
<https://adc.arm.gov/armlive/#scripts>`_.
To login/register for an access token click `here
<https://adc.arm.gov/armlive/livedata/home>`_.
Author: Michael Giansiracusa
Email: [email protected]
Web Tools Contact: Ranjeet Devarakonda [email protected]
Examples
--------
This code will download the netCDF files from the sgpmetE13.b1 datastream
and place them in a directory named sgpmetE13.b1. The data from 14 Jan to
20 Jan 2017 will be downloaded. Replace *userName* and *XXXXXXXXXXXXXXXX*
with your username and token for ARM Data Discovery. See the Notes for
information on how to obtain a username and token.
.. code-block:: python
act.discovery.download_data(
"userName", "XXXXXXXXXXXXXXXX", "sgpmetE13.b1", "2017-01-14", "2017-01-20"
)
"""
# default start and end are empty
start, end = '', ''
# start and end strings for query_url are constructed
# if the arguments were provided
if startdate:
start_datetime = date_parser(startdate, return_datetime=True)
start = start_datetime.strftime('%Y-%m-%dT%H:%M:%S.%f')[:-3] + 'Z'
start = f'&start={start}'
if enddate:
end_datetime = date_parser(enddate, return_datetime=True)
# If the start and end date are the same, and a day to the end date
if start_datetime == end_datetime:
end_datetime += timedelta(hours=23, minutes=59, seconds=59)
end = end_datetime.strftime('%Y-%m-%dT%H:%M:%S.%f')[:-3] + 'Z'
end = f'&end={end}'
# build the url to query the web service using the arguments provided
query_url = (
'https://adc.arm.gov/armlive/livedata/query?' + 'user={0}&ds={1}{2}{3}&wt=json'
).format(':'.join([username, token]), datastream, start, end)
# get url response, read the body of the message,
# and decode from bytes type to utf-8 string
response_body = urlopen(query_url).read().decode('utf-8')
# if the response is an html doc, then there was an error with the user
if response_body[1:14] == '!DOCTYPE html':
raise ConnectionRefusedError('Error with user. Check username or token.')
# parse into json object
response_body_json = json.loads(response_body)
# construct output directory
if output:
# output files to directory specified
output_dir = os.path.join(output)
else:
# if no folder given, add datastream folder
# to current working dir to prevent file mix-up
output_dir = os.path.join(os.getcwd(), datastream)
# not testing, response is successful and files were returned
if response_body_json is None:
print('ARM Data Live Webservice does not appear to be functioning')
return []
num_files = len(response_body_json['files'])
file_names = []
if response_body_json['status'] == 'success' and num_files > 0:
for fname in response_body_json['files']:
if time is not None:
if time not in fname:
continue
print(f'[DOWNLOADING] {fname}')
# construct link to web service saveData function
save_data_url = (
'https://adc.arm.gov/armlive/livedata/' + 'saveData?user={0}&file={1}'
).format(':'.join([username, token]), fname)
output_file = os.path.join(output_dir, fname)
# make directory if it doesn't exist
if not os.path.isdir(output_dir):
os.makedirs(output_dir)
# create file and write bytes to file
with open(output_file, 'wb') as open_bytes_file:
open_bytes_file.write(urlopen(save_data_url).read())
file_names.append(output_file)
else:
print(
'No files returned or url status error.\n' 'Check datastream name, start, and end date.'
)
return file_names
<|code_end|>
act/io/icartt.py
<|code_start|>"""
Modules for Reading/Writing the International Consortium for Atmospheric
Research on Transport and Transformation (ICARTT) file format standards V2.0
References:
ICARTT V2.0 Standards/Conventions:
- https://www.earthdata.nasa.gov/s3fs-public/imported/ESDS-RFC-029v2.pdf
"""
import numpy as np
import xarray as xr
try:
import icartt
_ICARTT_AVAILABLE = True
_format = icartt.Formats.FFI1001
except ImportError:
_ICARTT_AVAILABLE = False
_format = None
def read_icartt(filename, format=_format,
return_None=False, **kwargs):
"""
Returns `xarray.Dataset` with stored data and metadata from a user-defined
query of ICARTT from a single datastream. Has some procedures to ensure
time is correctly fomatted in returned Dataset.
Parameters
----------
filename : str
Name of file to read.
format : str
ICARTT Format to Read: FFI1001 or FFI2110.
return_None : bool, optional
Catch IOError exception when file not found and return None.
Default is False.
**kwargs : keywords
keywords to pass on through to icartt.Dataset.
Returns
-------
act_obj : Object (or None)
ACT dataset (or None if no data file(s) found).
Examples
--------
This example will load the example sounding data used for unit testing.
.. code-block :: python
import act
the_ds = act.io.icartt.read_icartt(act.tests.sample_files.AAF_SAMPLE_FILE)
print(the_ds.attrs['_datastream'])
"""
if not _ICARTT_AVAILABLE:
raise ImportError(
"ICARTT is required to use to read ICARTT files but is " +
"not installed")
ds = None
# Create an exception tuple to use with try statements. Doing it this way
# so we can add the FileNotFoundError if requested. Can add more error
# handling in the future.
except_tuple = (ValueError,)
if return_None:
except_tuple = except_tuple + (FileNotFoundError, OSError)
try:
# Read data file with ICARTT dataset.
ict = icartt.Dataset(filename, format=format, **kwargs)
except except_tuple as exception:
# If requested return None for File not found error
if type(exception).__name__ == 'FileNotFoundError':
return None
# If requested return None for File not found error
if (type(exception).__name__ == 'OSError'
and exception.args[0] == 'no files to open'):
return None
# Define the Uncertainty for each variable. Note it may not be calculated.
# If not calculated, assign 'N/A' to the attribute
uncertainty = ict.normalComments[6].split(':')[1].split(',')
# Define the Upper and Lower Limit of Detection Flags
ulod_flag = ict.normalComments[7].split(':')[1]
ulod_value = ict.normalComments[8].split(':')[1].split(',')
llod_flag = ict.normalComments[9].split(':')[1]
llod_value = ict.normalComments[10].split(':')[1].split(',')
# Convert ICARTT Object to Xarray Dataset
ds_container = []
# Counter for uncertainty/LOD values
counter = 0
# Loop over ICART variables, convert to Xarray DataArray, Append.
for key in ict.variables:
# Note time is the only independent variable within ICARTT
# Short name for time must be "Start_UTC" for ICARTT files.
if key != 'Start_UTC':
da = xr.DataArray(ict.data[key],
coords=dict(time=ict.times),
name=key, dims=['time'])
# Assume if Uncertainity does not match the number of variables,
# values were not set within the file. Needs to be string!
if len(uncertainty) != len(ict.variables):
da.attrs['uncertainty'] = 'N/A'
else:
da.attrs['uncertainty'] = uncertainty[counter]
# Assume if ULOD does not match the number of variables within the
# the file, ULOD values were not set.
if len(ulod_value) != len(ict.variables):
da.attrs['ULOD_Value'] = 'N/A'
else:
da.attrs['ULOD_Value'] = ulod_value[counter]
# Assume if LLOD does not match the number of variables within the
# the file, LLOD values were not set.
if len(llod_value) != len(ict.variables):
da.attrs['LLOD_Value'] = 'N/A'
else:
da.attrs['LLOD_Value'] = llod_value[counter]
# Define the meta data:
da.attrs['units'] = ict.variables[key].units
da.attrs['mvc'] = ict.variables[key].miss
da.attrs['scale_factor'] = ict.variables[key].scale
da.attrs['ULOD_Flag'] = ulod_flag
da.attrs['LLOD_Flag'] = llod_flag
# Append to ds container
ds_container.append(da.to_dataset(name=key))
# up the counter
counter += 1
# Concatenate each of the Xarray DataArrays into a single Xarray DataSet
ds = xr.merge(ds_container)
# Assign ICARTT Meta data to Xarray DataSet
ds.attrs['PI'] = ict.PIName
ds.attrs['PI_Affiliation'] = ict.PIAffiliation
ds.attrs['Platform'] = ict.dataSourceDescription
ds.attrs['Mission'] = ict.missionName
ds.attrs['DateOfCollection'] = ict.dateOfCollection
ds.attrs['DateOfRevision'] = ict.dateOfRevision
ds.attrs['Data_Interval'] = ict.dataIntervalCode
ds.attrs['Independent_Var'] = str(ict.independentVariable)
ds.attrs['Dependent_Var_Num'] = len(ict.dependentVariables)
ds.attrs['PI_Contact'] = ict.normalComments[0].split('\n')[0].split(':')[-1]
ds.attrs['Platform'] = ict.normalComments[1].split(':')[-1]
ds.attrs['Location'] = ict.normalComments[2].split(':')[-1]
ds.attrs['Associated_Data'] = ict.normalComments[3].split(':')[-1]
ds.attrs['Instrument_Info'] = ict.normalComments[4].split(':')[-1]
ds.attrs['Data_Info'] = ict.normalComments[5][11:]
ds.attrs['DM_Contact'] = ict.normalComments[11].split(':')[-1]
ds.attrs['Project_Info'] = ict.normalComments[12].split(':')[-1]
ds.attrs['Stipulations'] = ict.normalComments[13].split(':')[-1]
ds.attrs['Comments'] = ict.normalComments[14].split(':')[-1]
ds.attrs['Revision'] = ict.normalComments[15].split(':')[-1]
ds.attrs['Revision_Comments'] = ict.normalComments[15 + 1].split(':')[-1]
# Assign Additional ARM meta data to Xarray DatatSet
ds.attrs['_datastream'] = filename.split('/')[-1].split('_')[0]
# Return Xarray Dataset
return ds
<|code_end|>
|
act/discovery/get_armfiles.py
<|code_start|>"""
Script for downloading data from ARM's Live Data Webservice
"""
import argparse
import json
import os
import sys
from datetime import timedelta
try:
from urllib.request import urlopen
except ImportError:
from urllib import urlopen
from act.utils import date_parser
def download_data(username, token, datastream, startdate, enddate, time=None, output=None):
"""
This tool will help users utilize the ARM Live Data Webservice to download
ARM data.
Parameters
----------
username : str
The username to use for logging into the ADC archive.
token : str
The access token for accessing the ADC archive.
datastream : str
The name of the datastream to acquire.
startdate : str
The start date of the data to acquire. Formats accepted are
YYYY-MM-DD, DD.MM.YYYY, DD/MM/YYYY, YYYYMMDD, YYYY/MM/DD or
any of the previous formats with THH:MM:SS added onto the end
(ex. 2020-09-15T12:00:00).
enddate : str
The end date of the data to acquire. Formats accepted are
YYYY-MM-DD, DD.MM.YYYY, DD/MM/YYYY, YYYYMMDD or YYYY/MM/DD, or
any of the previous formats with THH:MM:SS added onto the end
(ex. 2020-09-15T13:00:00).
time: str or None
The specific time. Format is HHMMSS. Set to None to download all files
in the given date interval.
output : str
The output directory for the data. Set to None to make a folder in the
current working directory with the same name as *datastream* to place
the files in.
Returns
-------
files : list
Returns list of files retrieved
Notes
-----
This programmatic interface allows users to query and automate
machine-to-machine downloads of ARM data. This tool uses a REST URL and
specific parameters (saveData, query), user ID and access token, a
datastream name, a start date, and an end date, and data files matching
the criteria will be returned to the user and downloaded.
By using this web service, users can setup cron jobs and automatically
download data from /data/archive into their workspace. This will also
eliminate the manual step of following a link in an email to download data.
All other data files, which are not on the spinning
disk (on HPSS), will have to go through the regular ordering process.
More information about this REST API and tools can be found on `ARM Live
<https://adc.arm.gov/armlive/#scripts>`_.
To login/register for an access token click `here
<https://adc.arm.gov/armlive/livedata/home>`_.
Author: Michael Giansiracusa
Email: [email protected]
Web Tools Contact: Ranjeet Devarakonda [email protected]
Examples
--------
This code will download the netCDF files from the sgpmetE13.b1 datastream
and place them in a directory named sgpmetE13.b1. The data from 14 Jan to
20 Jan 2017 will be downloaded. Replace *userName* and *XXXXXXXXXXXXXXXX*
with your username and token for ARM Data Discovery. See the Notes for
information on how to obtain a username and token.
.. code-block:: python
act.discovery.download_data(
"userName", "XXXXXXXXXXXXXXXX", "sgpmetE13.b1", "2017-01-14", "2017-01-20"
)
"""
# default start and end are empty
start, end = '', ''
# start and end strings for query_url are constructed
# if the arguments were provided
if startdate:
start_datetime = date_parser(startdate, return_datetime=True)
start = start_datetime.strftime('%Y-%m-%dT%H:%M:%S.%f')[:-3] + 'Z'
start = f'&start={start}'
if enddate:
end_datetime = date_parser(enddate, return_datetime=True)
# If the start and end date are the same, and a day to the end date
if start_datetime == end_datetime:
end_datetime += timedelta(hours=23, minutes=59, seconds=59)
end = end_datetime.strftime('%Y-%m-%dT%H:%M:%S.%f')[:-3] + 'Z'
end = f'&end={end}'
# build the url to query the web service using the arguments provided
query_url = (
'https://adc.arm.gov/armlive/livedata/query?' + 'user={0}&ds={1}{2}{3}&wt=json'
).format(':'.join([username, token]), datastream, start, end)
# get url response, read the body of the message,
# and decode from bytes type to utf-8 string
response_body = urlopen(query_url).read().decode('utf-8')
# if the response is an html doc, then there was an error with the user
if response_body[1:14] == '!DOCTYPE html':
raise ConnectionRefusedError('Error with user. Check username or token.')
# parse into json object
response_body_json = json.loads(response_body)
# construct output directory
if output:
# output files to directory specified
output_dir = os.path.join(output)
else:
# if no folder given, add datastream folder
# to current working dir to prevent file mix-up
output_dir = os.path.join(os.getcwd(), datastream)
# not testing, response is successful and files were returned
if response_body_json is None:
print('ARM Data Live Webservice does not appear to be functioning')
return []
num_files = len(response_body_json['files'])
file_names = []
if response_body_json['status'] == 'success' and num_files > 0:
for fname in response_body_json['files']:
if time is not None:
if time not in fname:
continue
# construct link to web service saveData function
save_data_url = (
'https://adc.arm.gov/armlive/livedata/' + 'saveData?user={0}&file={1}'
).format(':'.join([username, token]), fname)
output_file = os.path.join(output_dir, fname)
# make directory if it doesn't exist
if not os.path.isdir(output_dir):
os.makedirs(output_dir)
# create file and write bytes to file
with open(output_file, 'wb') as open_bytes_file:
data = urlopen(save_data_url).read()
if 'This data file is not available' in str(data):
print(fname + ' is not available for download')
continue
else:
print(f'[DOWNLOADING] {fname}')
open_bytes_file.write(data)
file_names.append(output_file)
else:
print(
'No files returned or url status error.\n' 'Check datastream name, start, and end date.'
)
return file_names
<|code_end|>
act/io/icartt.py
<|code_start|>"""
Modules for Reading/Writing the International Consortium for Atmospheric
Research on Transport and Transformation (ICARTT) file format standards V2.0
References:
ICARTT V2.0 Standards/Conventions:
- https://www.earthdata.nasa.gov/s3fs-public/imported/ESDS-RFC-029v2.pdf
"""
import numpy as np
import xarray as xr
try:
import icartt
_ICARTT_AVAILABLE = True
_format = icartt.Formats.FFI1001
except ImportError:
_ICARTT_AVAILABLE = False
_format = None
def read_icartt(filename, format=_format,
return_None=False, **kwargs):
"""
Returns `xarray.Dataset` with stored data and metadata from a user-defined
query of ICARTT from a single datastream. Has some procedures to ensure
time is correctly fomatted in returned Dataset.
Parameters
----------
filename : str
Name of file to read.
format : str
ICARTT Format to Read: FFI1001 or FFI2110.
return_None : bool, optional
Catch IOError exception when file not found and return None.
Default is False.
**kwargs : keywords
keywords to pass on through to icartt.Dataset.
Returns
-------
act_obj : Object (or None)
ACT dataset (or None if no data file(s) found).
Examples
--------
This example will load the example sounding data used for unit testing.
.. code-block :: python
import act
the_ds = act.io.icartt.read_icartt(act.tests.sample_files.AAF_SAMPLE_FILE)
print(the_ds.attrs['_datastream'])
"""
if not _ICARTT_AVAILABLE:
raise ImportError(
"ICARTT is required to use to read ICARTT files but is " +
"not installed")
ds = None
# Create an exception tuple to use with try statements. Doing it this way
# so we can add the FileNotFoundError if requested. Can add more error
# handling in the future.
except_tuple = (ValueError,)
if return_None:
except_tuple = except_tuple + (FileNotFoundError, OSError)
try:
# Read data file with ICARTT dataset.
ict = icartt.Dataset(filename, format=format, **kwargs)
except except_tuple as exception:
# If requested return None for File not found error
if type(exception).__name__ == 'FileNotFoundError':
return None
# If requested return None for File not found error
if (type(exception).__name__ == 'OSError'
and exception.args[0] == 'no files to open'):
return None
# Define the Uncertainty for each variable. Note it may not be calculated.
# If not calculated, assign 'N/A' to the attribute
uncertainty = ict.normalComments[6].split(':')[1].split(',')
# Define the Upper and Lower Limit of Detection Flags
ulod_flag = ict.normalComments[7].split(':')[1]
ulod_value = ict.normalComments[8].split(':')[1].split(',')
llod_flag = ict.normalComments[9].split(':')[1]
llod_value = ict.normalComments[10].split(':')[1].split(',')
# Convert ICARTT Object to Xarray Dataset
ds_container = []
# Counter for uncertainty/LOD values
counter = 0
# Loop over ICART variables, convert to Xarray DataArray, Append.
for key in ict.variables:
# Note time is the only independent variable within ICARTT
# Short name for time must be "Start_UTC" for ICARTT files.
if key != 'Start_UTC':
if key == 'qc_flag':
key2 = 'quality_flag'
else:
key2 = key
da = xr.DataArray(ict.data[key],
coords=dict(time=ict.times),
name=key2, dims=['time'])
# Assume if Uncertainity does not match the number of variables,
# values were not set within the file. Needs to be string!
if len(uncertainty) != len(ict.variables):
da.attrs['uncertainty'] = 'N/A'
else:
da.attrs['uncertainty'] = uncertainty[counter]
# Assume if ULOD does not match the number of variables within the
# the file, ULOD values were not set.
if len(ulod_value) != len(ict.variables):
da.attrs['ULOD_Value'] = 'N/A'
else:
da.attrs['ULOD_Value'] = ulod_value[counter]
# Assume if LLOD does not match the number of variables within the
# the file, LLOD values were not set.
if len(llod_value) != len(ict.variables):
da.attrs['LLOD_Value'] = 'N/A'
else:
da.attrs['LLOD_Value'] = llod_value[counter]
# Define the meta data:
da.attrs['units'] = ict.variables[key].units
da.attrs['mvc'] = ict.variables[key].miss
da.attrs['scale_factor'] = ict.variables[key].scale
da.attrs['ULOD_Flag'] = ulod_flag
da.attrs['LLOD_Flag'] = llod_flag
# Append to ds container
ds_container.append(da.to_dataset(name=key2))
# up the counter
counter += 1
# Concatenate each of the Xarray DataArrays into a single Xarray DataSet
ds = xr.merge(ds_container)
# Assign ICARTT Meta data to Xarray DataSet
ds.attrs['PI'] = ict.PIName
ds.attrs['PI_Affiliation'] = ict.PIAffiliation
ds.attrs['Platform'] = ict.dataSourceDescription
ds.attrs['Mission'] = ict.missionName
ds.attrs['DateOfCollection'] = ict.dateOfCollection
ds.attrs['DateOfRevision'] = ict.dateOfRevision
ds.attrs['Data_Interval'] = ict.dataIntervalCode
ds.attrs['Independent_Var'] = str(ict.independentVariable)
ds.attrs['Dependent_Var_Num'] = len(ict.dependentVariables)
ds.attrs['PI_Contact'] = ict.normalComments[0].split('\n')[0].split(':')[-1]
ds.attrs['Platform'] = ict.normalComments[1].split(':')[-1]
ds.attrs['Location'] = ict.normalComments[2].split(':')[-1]
ds.attrs['Associated_Data'] = ict.normalComments[3].split(':')[-1]
ds.attrs['Instrument_Info'] = ict.normalComments[4].split(':')[-1]
ds.attrs['Data_Info'] = ict.normalComments[5][11:]
ds.attrs['DM_Contact'] = ict.normalComments[11].split(':')[-1]
ds.attrs['Project_Info'] = ict.normalComments[12].split(':')[-1]
ds.attrs['Stipulations'] = ict.normalComments[13].split(':')[-1]
ds.attrs['Comments'] = ict.normalComments[14].split(':')[-1]
ds.attrs['Revision'] = ict.normalComments[15].split(':')[-1]
ds.attrs['Revision_Comments'] = ict.normalComments[15 + 1].split(':')[-1]
# Assign Additional ARM meta data to Xarray DatatSet
ds.attrs['_datastream'] = filename.split('/')[-1].split('_')[0]
# Return Xarray Dataset
return ds
<|code_end|>
|
Xarray not able to read old MMCR data
Some older data might not be compatible with xarray. sgpmmcrmomC1.b1.20041107 data yielded this error:
xarray.core.variable.MissingDimensionsError: 'heights' has more than 1-dimension and the same name as one of its dimensions ('mode', 'heights'). xarray disallows such variables because they conflict with the coordinates used to label dimensions.
Is there a way around this?
|
act/io/__init__.py
<|code_start|>"""
This module contains procedures for reading and writing various ARM datasets.
"""
import lazy_loader as lazy
__getattr__, __dir__, __all__ = lazy.attach(
__name__,
submodules=['armfiles', 'csvfiles', 'icartt', 'mpl', 'noaagml', 'noaapsl', 'pysp2'],
submod_attrs={
'armfiles': [
'WriteDataset',
'check_arm_standards',
'create_obj_from_arm_dod',
'read_netcdf',
],
'csvfiles': ['read_csv'],
'icartt': ['read_icartt'],
'mpl': ['proc_sigma_mplv5_read', 'read_sigma_mplv5'],
'noaagml': [
'read_gml',
'read_gml_co2',
'read_gml_halo',
'read_gml_met',
'read_gml_ozone',
'read_gml_radiation',
],
'noaapsl': [
'read_psl_wind_profiler',
'read_psl_wind_profiler_temperature',
'read_psl_parsivel',
'read_psl_radar_fmcw_moment',
],
'pysp2': ['read_hk_file', 'read_sp2', 'read_sp2_dat'],
},
)
<|code_end|>
act/io/armfiles.py
<|code_start|>"""
This module contains I/O operations for loading files that were created for the
Atmospheric Radiation Measurement program supported by the Department of Energy
Office of Science.
"""
import copy
import glob
import json
import re
import urllib
import warnings
from pathlib import Path, PosixPath
from netCDF4 import Dataset
import numpy as np
import xarray as xr
import act.utils as utils
from act.config import DEFAULT_DATASTREAM_NAME
def read_netcdf(
filenames,
concat_dim=None,
return_None=False,
combine='by_coords',
use_cftime=True,
cftime_to_datetime64=True,
combine_attrs='override',
cleanup_qc=False,
keep_variables=None,
**kwargs,
):
"""
Returns `xarray.Dataset` with stored data and metadata from a user-defined
query of ARM-standard netCDF files from a single datastream. Has some procedures
to ensure time is correctly fomatted in returned Dataset.
Parameters
----------
filenames : str, pathlib.PosixPath or list of str
Name of file(s) to read.
concat_dim : str
Dimension to concatenate files along. Default value is 'None'.
return_None : bool, optional
Catch IOError exception when file not found and return None.
Default is False.
combine : str
String used by xarray.open_mfdataset() to determine how to combine
data files into one Dataset. See Xarray documentation for options.
use_cftime : boolean
Option to use cftime library to parse the time units string and correctly
establish the time values with a units string containing timezone offset.
This will return the time in cftime format. See cftime_to_datetime64 if
don't want to convert the times in xarray dataset from cftime to numpy datetime64.
cftime_to_datetime64 : boolean
If time is stored as cftime in xarray dataset convert to numpy datetime64. If time
precision requried is sub millisecond set decode_times=False but leave
cftime_to_datetime64=True. This will force it to use base_time and time_offset
to set time.
combine_attrs : str
String indicating how to combine attrs of the objects being merged
cleanup_qc : boolean
Call clean.cleanup() method to convert to standardized ancillary quality control
variables. This will not allow any keyword options, so if non-default behavior is
desired will need to call clean.cleanup() method on the object after reading the data.
keep_variables : str or list of str
Variable names to read from data file. Works by creating a list of variable names
to exclude from reading and passing into open_mfdataset() via drop_variables keyword.
Still allows use of drop_variables keyword for variables not listed in first file to
read.
**kwargs : keywords
Keywords to pass through to xarray.open_mfdataset().
Returns
-------
act_obj : Object (or None)
ACT dataset (or None if no data file(s) found).
Examples
--------
This example will load the example sounding data used for unit testing.
.. code-block :: python
import act
the_ds, the_flag = act.io.armfiles.read_netcdf(act.tests.sample_files.EXAMPLE_SONDE_WILDCARD)
print(the_ds.attrs._datastream)
"""
ds = None
file_dates = []
file_times = []
# Add funciton keywords to kwargs dictionary for passing into open_mfdataset.
kwargs['combine'] = combine
kwargs['concat_dim'] = concat_dim
kwargs['use_cftime'] = use_cftime
if len(filenames) > 1 and not isinstance(filenames, str):
kwargs['combine_attrs'] = combine_attrs
# Check if keep_variables is set. If so determine correct drop_variables
if keep_variables is not None:
drop_variables = None
if 'drop_variables' in kwargs.keys():
drop_variables = kwargs['drop_variables']
kwargs['drop_variables'] = keep_variables_to_drop_variables(
filenames, keep_variables, drop_variables=drop_variables)
# Create an exception tuple to use with try statements. Doing it this way
# so we can add the FileNotFoundError if requested. Can add more error
# handling in the future.
except_tuple = (ValueError,)
if return_None:
except_tuple = except_tuple + (FileNotFoundError, OSError)
try:
# Read data file with Xarray function
ds = xr.open_mfdataset(filenames, **kwargs)
except except_tuple as exception:
# If requested return None for File not found error
if type(exception).__name__ == 'FileNotFoundError':
return None
# If requested return None for File not found error
if type(exception).__name__ == 'OSError' and exception.args[0] == 'no files to open':
return None
# Look at error message and see if could be nested error message. If so
# update combine keyword and try again. This should allow reading files
# without a time variable but base_time and time_offset variables.
if (
kwargs['combine'] != 'nested'
and type(exception).__name__ == 'ValueError'
and exception.args[0] == 'Could not find any dimension coordinates '
'to use to order the datasets for concatenation'
):
kwargs['combine'] = 'nested'
ds = xr.open_mfdataset(filenames, **kwargs)
else:
# When all else fails raise the orginal exception
raise exception
# Xarray has issues reading a CF formatted time units string if it contains
# timezone offset without a [+|-] preceeding timezone offset.
# https://github.com/pydata/xarray/issues/3644
# To ensure the times are read in correctly need to set use_cftime=True.
# This will read in time as cftime object. But Xarray uses numpy datetime64
# natively. This will convert the cftime time values to numpy datetime64. cftime
# does not preserve the time past ms precision. We will use ms precision for
# the conversion.
desired_time_precision = 'datetime64[ms]'
for var_name in ['time', 'time_offset']:
try:
if (
cftime_to_datetime64
and 'time' in ds.dims
and type(ds[var_name].values[0]).__module__.startswith('cftime.')
):
# If we just convert time to datetime64 the group, sel, and other Xarray
# methods will not work correctly because time is not indexed. Need to
# use the formation of a Dataset to correctly set the time indexing.
temp_ds = xr.Dataset(
{
var_name: (
ds[var_name].dims,
ds[var_name].values.astype(desired_time_precision),
ds[var_name].attrs,
)
}
)
ds[var_name] = temp_ds[var_name]
temp_ds.close()
# If time_offset is in file try to convert base_time as well
if var_name == 'time_offset':
ds['base_time'].values = ds['base_time'].values.astype(desired_time_precision)
except KeyError:
pass
# Check if "time" variable is not in the netCDF file. If so try to use
# base_time and time_offset to make time variable. Basically a fix for incorrectly
# formatted files. May require using decode_times=False to initially read the data.
if (
cftime_to_datetime64
and 'time' in ds.dims
and 'time' not in ds.coords
and 'time_offset' in ds.data_vars
):
try:
ds = ds.rename({'time_offset': 'time'})
ds = ds.set_coords('time')
del ds['time'].attrs['units']
except (KeyError, ValueError):
pass
# If "time" is not a datetime64 use base_time to calcualte corect values to datetime64
# by adding base_time to time_offset. time_offset was renamed to time above.
if (
cftime_to_datetime64
and 'time' in ds.dims
and 'base_time' in ds.data_vars
and not np.issubdtype(ds['time'].values.dtype, np.datetime64)
and not type(ds['time'].values[0]).__module__.startswith('cftime.')
):
# Use microsecond precision to create time since epoch. Then convert to datetime64
if ds['base_time'].values == ds['time_offset'].values[0]:
time = ds['time_offset'].values
else:
time = (ds['base_time'].values + ds['time_offset'].values * 1000000.0).astype(
'datetime64[us]'
)
# Need to use a new Dataset creation to correctly index time for use with
# .group and .resample methods in Xarray Datasets.
temp_ds = xr.Dataset({'time': (ds['time'].dims, time, ds['time'].attrs)})
ds['time'] = temp_ds['time']
temp_ds.close()
for att_name in ['units', 'ancillary_variables']:
try:
del ds['time'].attrs[att_name]
except KeyError:
pass
# Adding support for wildcards
if isinstance(filenames, str):
filenames = glob.glob(filenames)
elif isinstance(filenames, PosixPath):
filenames = [filenames]
# Get file dates and times that were read in to the object
filenames.sort()
for f in filenames:
f = Path(f).name
pts = re.match(r'(^[a-zA-Z0-9]+)\.([0-9a-z]{2})\.([\d]{8})\.([\d]{6})\.([a-z]{2,3}$)', f)
# If Not ARM format, read in first time for info
if pts is not None:
pts = pts.groups()
file_dates.append(pts[2])
file_times.append(pts[3])
else:
if ds['time'].size > 1:
dummy = ds['time'].values[0]
else:
dummy = ds['time'].values
file_dates.append(utils.numpy_to_arm_date(dummy))
file_times.append(utils.numpy_to_arm_date(dummy, returnTime=True))
# Add attributes
ds.attrs['_file_dates'] = file_dates
ds.attrs['_file_times'] = file_times
is_arm_file_flag = check_arm_standards(ds)
# Ensure that we have _datastream set whether or no there's
# a datastream attribute already.
if is_arm_file_flag == 0:
ds.attrs['_datastream'] = DEFAULT_DATASTREAM_NAME
else:
ds.attrs['_datastream'] = ds.attrs['datastream']
ds.attrs['_arm_standards_flag'] = is_arm_file_flag
if cleanup_qc:
ds.clean.cleanup()
return ds
def keep_variables_to_drop_variables(
filenames,
keep_variables,
drop_variables=None):
"""
Returns a list of variable names to exclude from reading by passing into
`Xarray.open_dataset` drop_variables keyword. This can greatly help reduce
loading time and disk space use of the Dataset.
When passed a netCDF file name, will open the file using the netCDF4 library to get
list of variable names. There is less overhead reading the varible names using
netCDF4 library than Xarray. If more than one filename is provided or string is
used for shell syntax globbing, will use the first file in the list.
Parameters
----------
filenames : str, pathlib.PosixPath or list of str
Name of file(s) to read.
keep_variables : str or list of str
Variable names desired to keep. Do not need to list associated dimention
names. These will be automatically kept as well.
drop_variables : str or list of str
Variable names to explicitly add to returned list. May be helpful if a variable
exists in a file that is not in the first file in the list.
Returns
-------
act_obj : list of str
Variable names to exclude from returned Dataset by using drop_variables keyword
when calling Xarray.open_dataset().
Examples
--------
.. code-block :: python
import act
filename = '/data/datastream/hou/houkasacrcfrM1.a1/houkasacrcfrM1.a1.20220404.*.nc'
drop_vars = act.io.armfiles.keep_variables_to_drop_variables(
filename, ['lat','lon','alt','crosspolar_differential_phase'],
drop_variables='variable_name_that_only_exists_in_last_file_of_the_day')
"""
read_variables = []
return_variables = []
if isinstance(keep_variables, str):
keep_variables = [keep_variables]
if isinstance(drop_variables, str):
drop_variables = [drop_variables]
# If filenames is a list subset to first file name.
if isinstance(filenames, (list, tuple)):
filename = filenames[0]
# If filenames is a string, check if it needs to be expanded in shell
# first. Then use first returned file name. Else use the string filename.
elif isinstance(filenames, str):
filename = glob.glob(filenames)
if len(filename) == 0:
return return_variables
else:
filename.sort()
filename = filename[0]
# Use netCDF4 library to extract the variable and dimension names.
rootgrp = Dataset(filename, 'r')
read_variables = list(rootgrp.variables)
dimensions = list(rootgrp.dimensions)
# Loop over the variables to exclude needed coordinate dimention names.
dims_to_keep = []
for var_name in keep_variables:
try:
dims_to_keep.extend(list(rootgrp[var_name].dimensions))
except IndexError:
pass
rootgrp.close()
# Remove names not matching keep_varibles excluding the associated coordinate dimentions
return_variables = set(read_variables) - set(keep_variables) - set(dims_to_keep)
# Add drop_variables to list
if drop_variables is not None:
return_variables = set(return_variables) | set(drop_variables)
return list(return_variables)
def check_arm_standards(ds):
"""
Checks to see if an xarray dataset conforms to ARM standards.
Parameters
----------
ds : xarray dataset
The dataset to check.
Returns
-------
flag : int
The flag corresponding to whether or not the file conforms
to ARM standards. Bit packed, so 0 for no, 1 for yes
"""
the_flag = 1 << 0
if 'datastream' not in ds.attrs.keys():
the_flag = 0
# Check if the historical global attribute name is
# used instead of updated name of 'datastream'. If so
# correct the global attributes and flip flag.
if 'zeb_platform' in ds.attrs.keys():
ds.attrs['datastream'] = copy.copy(ds.attrs['zeb_platform'])
del ds.attrs['zeb_platform']
the_flag = 1 << 0
return the_flag
def create_obj_from_arm_dod(proc, set_dims, version='', fill_value=-9999.0, scalar_fill_dim=None):
"""
Queries the ARM DOD api and builds an object based on the ARM DOD and
the dimension sizes that are passed in.
Parameters
----------
proc : string
Process to create the object off of. This is normally in the
format of inst.level. i.e. vdis.b1 or kazrge.a1
set_dims : dict
Dictionary of dims from the DOD and the corresponding sizes.
Time is required. Code will try and pull from DOD, unless set
through this variable
Note: names need to match exactly what is in the dod
i.e. {'drop_diameter': 50, 'time': 1440}
version : string
Version number of the ingest to use. If not set, defaults to
latest version
fill_value : float
Fill value for non-dimension variables. Dimensions cannot have
duplicate values and are incrementally set (0, 1, 2)
scalar_fill_dim : str
Depending on how the object is set up, sometimes the scalar values
are dimensioned to the main dimension. i.e. a lat/lon is set to have
a dimension of time. This is a way to set it up similarly.
Returns
-------
obj : xarray Dataset
ACT object populated with all variables and attributes.
Examples
--------
.. code-block :: python
dims = {'time': 1440, 'drop_diameter': 50}
obj = act.io.armfiles.create_obj_from_arm_dod(
'vdis.b1', dims, version='1.2', scalar_fill_dim='time')
"""
# Set base url to get DOD information
base_url = 'https://pcm.arm.gov/pcm/api/dods/'
# Get data from DOD api
with urllib.request.urlopen(base_url + proc) as url:
data = json.loads(url.read().decode())
# Check version numbers and alert if requested version in not available
keys = list(data['versions'].keys())
if version not in keys:
warnings.warn(
' '.join(
['Version:', version, 'not available or not specified. Using Version:', keys[-1]]
),
UserWarning,
)
version = keys[-1]
# Create empty xarray dataset
obj = xr.Dataset()
# Get the global attributes and add to dataset
atts = {}
for a in data['versions'][version]['atts']:
if a['name'] == 'string':
continue
if a['value'] is None:
a['value'] = ''
atts[a['name']] = a['value']
obj.attrs = atts
# Get variable information and create dataarrays that are
# then added to the dataset
# If not passed in through set_dims, will look to the DOD
# if not set in the DOD, then will raise error
variables = data['versions'][version]['vars']
dod_dims = data['versions'][version]['dims']
for d in dod_dims:
if d['name'] not in list(set_dims.keys()):
if d['length'] > 0:
set_dims[d['name']] = d['length']
else:
raise ValueError(
'Dimension length not set in DOD for '
+ d['name']
+ ', nor passed in through set_dim'
)
for v in variables:
dims = v['dims']
dim_shape = []
# Using provided dimension data, fill array accordingly for easy overwrite
if len(dims) == 0:
if scalar_fill_dim is None:
data_na = fill_value
else:
data_na = np.full(set_dims[scalar_fill_dim], fill_value)
v['dims'] = scalar_fill_dim
else:
for d in dims:
dim_shape.append(set_dims[d])
if len(dim_shape) == 1 and v['name'] == dims[0]:
data_na = np.arange(dim_shape[0])
else:
data_na = np.full(dim_shape, fill_value)
# Get attribute information. Had to do some things to get to print to netcdf
atts = {}
str_flag = False
for a in v['atts']:
if a['name'] == 'string':
str_flag = True
continue
if a['value'] is None:
continue
if str_flag and a['name'] == 'units':
continue
atts[a['name']] = a['value']
da = xr.DataArray(data=data_na, dims=v['dims'], name=v['name'], attrs=atts)
obj[v['name']] = da
return obj
@xr.register_dataset_accessor('write')
class WriteDataset:
"""
Class for cleaning up Dataset before writing to file.
"""
def __init__(self, xarray_obj):
self._obj = xarray_obj
def write_netcdf(
self,
cleanup_global_atts=True,
cleanup_qc_atts=True,
join_char='__',
make_copy=True,
cf_compliant=False,
delete_global_attrs=['qc_standards_version', 'qc_method', 'qc_comment'],
FillValue=-9999,
cf_convention='CF-1.8',
**kwargs,
):
"""
This is a wrapper around Dataset.to_netcdf to clean up the Dataset before
writing to disk. Some things are added to global attributes during ACT reading
process, and QC variables attributes are modified during QC cleanup process.
This will modify before writing to disk to better
match Climate & Forecast standards.
Parameters
----------
cleanup_global_atts : boolean
Option to cleanup global attributes by removing any global attribute
that starts with an underscore.
cleanup_qc_atts : boolean
Option to convert attributes that would be written as string array
to be a single character string. CF 1.7 does not allow string attribures.
Will use a single space a delimeter between values and join_char to replace
white space between words.
join_char : str
The character sting to use for replacing white spaces between words when converting
a list of strings to single character string attributes.
make_copy : boolean
Make a copy before modifying Dataset to write. For large Datasets this
may add processing time and memory. If modifying the Dataset is OK
try setting to False.
cf_compliant : boolean
Option to output file with additional attributes to make file Climate & Forecast
complient. May require runing .clean.cleanup() method on the object to fix other
issues first. This does the best it can but it may not be truely complient. You
should read the CF documents and try to make complient before writing to file.
delete_global_attrs : list
Optional global attributes to be deleted. Defaults to some standard
QC attributes that are not needed. Can add more or set to None to not
remove the attributes.
FillValue : int, float
The value to use as a _FillValue in output file. This is used to fix
issues with how Xarray handles missing_value upon reading. It's confusing
so not a perfect fix. Set to None to leave Xarray to do what it wants.
Set to a value to be the value used as _FillValue in the file and data
array. This should then remove missing_value attribute from the file as well.
cf_convention : str
The Climate and Forecast convention string to add to Conventions attribute.
**kwargs : keywords
Keywords to pass through to Dataset.to_netcdf()
Examples
--------
.. code-block :: python
ds_object.write.write_netcdf(path='output.nc')
"""
if make_copy:
write_obj = copy.deepcopy(self._obj)
else:
write_obj = self._obj
encoding = {}
if cleanup_global_atts:
for attr in list(write_obj.attrs):
if attr.startswith('_'):
del write_obj.attrs[attr]
if cleanup_qc_atts:
check_atts = ['flag_meanings', 'flag_assessments']
for var_name in list(write_obj.data_vars):
if 'standard_name' not in write_obj[var_name].attrs.keys():
continue
for attr_name in check_atts:
try:
att_values = write_obj[var_name].attrs[attr_name]
if isinstance(att_values, (list, tuple)):
att_values = [att_value.replace(' ', join_char) for att_value in att_values]
write_obj[var_name].attrs[attr_name] = ' '.join(att_values)
except KeyError:
pass
# Tell .to_netcdf() to not add a _FillValue attribute for
# quality control variables.
if FillValue is not None:
encoding[var_name] = {'_FillValue': None}
# Clean up _FillValue vs missing_value mess by creating an
# encoding dictionary with each variable's _FillValue set to
# requested fill value. May need to improve upon this for data type
# and other issues in the future.
if FillValue is not None:
skip_variables = ['base_time', 'time_offset', 'qc_time'] + list(encoding.keys())
for var_name in list(write_obj.data_vars):
if var_name not in skip_variables:
encoding[var_name] = {'_FillValue': FillValue}
if delete_global_attrs is not None:
for attr in delete_global_attrs:
try:
del write_obj.attrs[attr]
except KeyError:
pass
# If requested update global attributes and variables attributes for required
# CF attributes.
if cf_compliant:
# Get variable names and standard name for each variable
var_names = list(write_obj.keys())
standard_names = []
for var_name in var_names:
try:
standard_names.append(write_obj[var_name].attrs['standard_name'])
except KeyError:
standard_names.append(None)
# Check if time varible has axis and standard_name attribute
coord_name = 'time'
try:
write_obj[coord_name].attrs['axis']
except KeyError:
try:
write_obj[coord_name].attrs['axis'] = 'T'
except KeyError:
pass
try:
write_obj[coord_name].attrs['standard_name']
except KeyError:
try:
write_obj[coord_name].attrs['standard_name'] = 'time'
except KeyError:
pass
# Try to determine type of dataset by coordinate dimention named time
# and other factors
try:
write_obj.attrs['FeatureType']
except KeyError:
dim_names = list(write_obj.dims)
FeatureType = None
if dim_names == ['time']:
FeatureType = 'timeSeries'
elif len(dim_names) == 2 and 'time' in dim_names and 'bound' in dim_names:
FeatureType = 'timeSeries'
elif len(dim_names) >= 2 and 'time' in dim_names:
for var_name in var_names:
dims = list(write_obj[var_name].dims)
if len(dims) == 2 and 'time' in dims:
prof_dim = list(set(dims) - {'time'})[0]
if write_obj[prof_dim].values.size > 2:
FeatureType = 'timeSeriesProfile'
break
if FeatureType is not None:
write_obj.attrs['FeatureType'] = FeatureType
# Add axis and positive attributes to variables with standard_name
# equal to 'altitude'
alt_variables = [
var_names[ii] for ii, sn in enumerate(standard_names) if sn == 'altitude'
]
for var_name in alt_variables:
try:
write_obj[var_name].attrs['axis']
except KeyError:
write_obj[var_name].attrs['axis'] = 'Z'
try:
write_obj[var_name].attrs['positive']
except KeyError:
write_obj[var_name].attrs['positive'] = 'up'
# Check if the Conventions global attribute lists the CF convention
try:
Conventions = write_obj.attrs['Conventions']
Conventions = Conventions.split()
cf_listed = False
for ii in Conventions:
if ii.startswith('CF-'):
cf_listed = True
break
if not cf_listed:
Conventions.append(cf_convention)
write_obj.attrs['Conventions'] = ' '.join(Conventions)
except KeyError:
write_obj.attrs['Conventions'] = str(cf_convention)
# Reorder global attributes to ensure history is last
try:
global_attrs = write_obj.attrs
history = copy.copy(global_attrs['history'])
del global_attrs['history']
global_attrs['history'] = history
except KeyError:
pass
write_obj.to_netcdf(encoding=encoding, **kwargs)
<|code_end|>
act/plotting/timeseriesdisplay.py
<|code_start|>"""
Stores the class for TimeSeriesDisplay.
"""
import datetime as dt
import textwrap
import warnings
from copy import deepcopy
from re import search, search as re_search
import matplotlib as mpl
import matplotlib.dates as mdates
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from matplotlib import colors as mplcolors
from mpl_toolkits.axes_grid1 import make_axes_locatable
from scipy.interpolate import NearestNDInterpolator
from ..qc.qcfilter import parse_bit
from ..utils import data_utils, datetime_utils as dt_utils
from ..utils.datetime_utils import determine_time_delta, reduce_time_ranges
from ..utils.geo_utils import get_sunrise_sunset_noon
from . import common
from .plot import Display
class TimeSeriesDisplay(Display):
"""
This subclass contains routines that are specific to plotting
time series plots from data. It is inherited from Display and therefore
contains all of Display's attributes and methods.
Examples
--------
To create a TimeSeriesDisplay with 3 rows, simply do:
.. code-block:: python
ds = act.read_netcdf(the_file)
disp = act.plotting.TimeSeriesDisplay(ds, subplot_shape=(3,), figsize=(15, 5))
The TimeSeriesDisplay constructor takes in the same keyword arguments as
plt.subplots. For more information on the plt.subplots keyword arguments,
see the `matplotlib documentation
<https://matplotlib.org/api/_as_gen/matplotlib.pyplot.subplots.html>`_.
If no subplot_shape is provided, then no figure or axis will be created
until add_subplots or plots is called.
"""
def __init__(self, obj, subplot_shape=(1,), ds_name=None, **kwargs):
super().__init__(obj, subplot_shape, ds_name, **kwargs)
def day_night_background(self, dsname=None, subplot_index=(0,)):
"""
Colorcodes the background according to sunrise/sunset.
Parameters
----------
dsname : None or str
If there is more than one datastream in the display object the
name of the datastream needs to be specified. If set to None and
there is only one datastream then ACT will use the sole datastream
in the object.
subplot_index : 1 or 2D tuple, list, or array
The index to the subplot to place the day and night background in.
"""
if dsname is None and len(self._obj.keys()) > 1:
raise ValueError(
'You must choose a datastream to derive the '
+ 'information needed for the day and night '
+ 'background when 2 or more datasets are in '
+ 'the display object.'
)
elif dsname is None:
dsname = list(self._obj.keys())[0]
# Get File Dates
try:
file_dates = self._obj[dsname].attrs['_file_dates']
except KeyError:
file_dates = []
if len(file_dates) == 0:
sdate = dt_utils.numpy_to_arm_date(self._obj[dsname].time.values[0])
edate = dt_utils.numpy_to_arm_date(self._obj[dsname].time.values[-1])
file_dates = [sdate, edate]
all_dates = dt_utils.dates_between(file_dates[0], file_dates[-1])
if self.axes is None:
raise RuntimeError('day_night_background requires the plot to ' 'be displayed.')
ax = self.axes[subplot_index]
# Find variable names for latitude and longitude
variables = list(self._obj[dsname].data_vars)
lat_name = [var for var in ['lat', 'latitude'] if var in variables]
lon_name = [var for var in ['lon', 'longitude'] if var in variables]
if len(lat_name) == 0:
lat_name = None
else:
lat_name = lat_name[0]
if len(lon_name) == 0:
lon_name = None
else:
lon_name = lon_name[0]
# Variable name does not match, look for standard_name declaration
if lat_name is None or lon_name is None:
for var in variables:
try:
if self._obj[dsname][var].attrs['standard_name'] == 'latitude':
lat_name = var
except KeyError:
pass
try:
if self._obj[dsname][var].attrs['standard_name'] == 'longitude':
lon_name = var
except KeyError:
pass
if lat_name is not None and lon_name is not None:
break
if lat_name is None or lon_name is None:
return
try:
if self._obj[dsname][lat_name].data.size > 1:
# Look for non-NaN values to use for locaiton. If not found use first value.
lat = self._obj[dsname][lat_name].values
index = np.where(np.isfinite(lat))[0]
if index.size == 0:
index = [0]
lat = float(lat[index[0]])
# Look for non-NaN values to use for locaiton. If not found use first value.
lon = self._obj[dsname][lon_name].values
index = np.where(np.isfinite(lon))[0]
if index.size == 0:
index = [0]
lon = float(lon[index[0]])
else:
lat = float(self._obj[dsname][lat_name].values)
lon = float(self._obj[dsname][lon_name].values)
except AttributeError:
return
if not np.isfinite(lat):
warnings.warn(f"Latitude value in dataset of '{lat}' is not finite. ", RuntimeWarning)
return
if not np.isfinite(lon):
warnings.warn(f"Longitude value in dataset of '{lon}' is not finite. ", RuntimeWarning)
return
lat_range = [-90, 90]
if not (lat_range[0] <= lat <= lat_range[1]):
warnings.warn(
f"Latitude value in dataset of '{lat}' not within acceptable "
f'range of {lat_range[0]} <= latitude <= {lat_range[1]}. ',
RuntimeWarning,
)
return
lon_range = [-180, 180]
if not (lon_range[0] <= lon <= lon_range[1]):
warnings.warn(
f"Longitude value in dataset of '{lon}' not within acceptable "
f'range of {lon_range[0]} <= longitude <= {lon_range[1]}. ',
RuntimeWarning,
)
return
# initialize the plot to a gray background for total darkness
rect = ax.patch
rect.set_facecolor('0.85')
# Get date ranges to plot
plot_dates = []
for f in all_dates:
for ii in [-1, 0, 1]:
plot_dates.append(f + dt.timedelta(days=ii))
# Get sunrise, sunset and noon times
sunrise, sunset, noon = get_sunrise_sunset_noon(lat, lon, plot_dates)
# Plot daylight
for ii in range(0, len(sunrise)):
ax.axvspan(sunrise[ii], sunset[ii], facecolor='#FFFFCC', zorder=0)
# Plot noon line
for ii in noon:
ax.axvline(x=ii, linestyle='--', color='y', zorder=1)
def set_xrng(self, xrng, subplot_index=(0,)):
"""
Sets the x range of the plot.
Parameters
----------
xrng : 2 number array
The x limits of the plot.
subplot_index : 1 or 2D tuple, list, or array
The index of the subplot to set the x range of.
"""
if self.axes is None:
raise RuntimeError('set_xrng requires the plot to be displayed.')
self.axes[subplot_index].set_xlim(xrng)
# Make sure that the xrng value is a numpy array not pandas
if isinstance(xrng[0], pd.Timestamp):
xrng = [x.to_numpy() for x in xrng if isinstance(x, pd.Timestamp)]
# Make sure that the xrng value is a numpy array not datetime.datetime
if isinstance(xrng[0], dt.datetime):
xrng = [np.datetime64(x) for x in xrng if isinstance(x, dt.datetime)]
if len(subplot_index) < 2:
self.xrng[subplot_index, 0] = xrng[0].astype('datetime64[D]').astype(float)
self.xrng[subplot_index, 1] = xrng[1].astype('datetime64[D]').astype(float)
else:
self.xrng[subplot_index][0] = xrng[0].astype('datetime64[D]').astype(float)
self.xrng[subplot_index][1] = xrng[1].astype('datetime64[D]').astype(float)
def set_yrng(self, yrng, subplot_index=(0,)):
"""
Sets the y range of the plot.
Parameters
----------
yrng : 2 number array
The y limits of the plot.
subplot_index : 1 or 2D tuple, list, or array
The index of the subplot to set the x range of.
"""
if self.axes is None:
raise RuntimeError('set_yrng requires the plot to be displayed.')
if not hasattr(self, 'yrng') and len(self.axes.shape) == 2:
self.yrng = np.zeros((self.axes.shape[0], self.axes.shape[1], 2))
elif not hasattr(self, 'yrng') and len(self.axes.shape) == 1:
self.yrng = np.zeros((self.axes.shape[0], 2))
if yrng[0] == yrng[1]:
yrng[1] = yrng[1] + 1
self.axes[subplot_index].set_ylim(yrng)
try:
self.yrng[subplot_index, :] = yrng
except IndexError:
self.yrng[subplot_index] = yrng
def plot(
self,
field,
dsname=None,
subplot_index=(0,),
cmap=None,
set_title=None,
add_nan=False,
day_night_background=False,
invert_y_axis=False,
abs_limits=(None, None),
time_rng=None,
y_rng=None,
use_var_for_y=None,
set_shading='auto',
assessment_overplot=False,
overplot_marker='.',
overplot_behind=False,
overplot_markersize=6,
assessment_overplot_category={
'Incorrect': ['Bad', 'Incorrect'],
'Suspect': ['Indeterminate', 'Suspect'],
},
assessment_overplot_category_color={'Incorrect': 'red', 'Suspect': 'orange'},
force_line_plot=False,
labels=False,
cbar_label=None,
cbar_h_adjust=None,
secondary_y=False,
y_axis_flag_meanings=False,
colorbar_labels=None,
**kwargs,
):
"""
Makes a timeseries plot. If subplots have not been added yet, an axis
will be created assuming that there is only going to be one plot.
Parameters
----------
field : str
The name of the field to plot.
dsname : None or str
If there is more than one datastream in the display object the
name of the datastream needs to be specified. If set to None and
there is only one datastream ACT will use the sole datastream
in the object.
subplot_index : 1 or 2D tuple, list, or array
The index of the subplot to set the x range of.
cmap : matplotlib colormap
The colormap to use.
set_title : str
The title for the plot.
add_nan : bool
Set to True to fill in data gaps with NaNs.
day_night_background : bool
Set to True to fill in a color coded background.
according to the time of day.
abs_limits : tuple or list
Sets the bounds on plot limits even if data values exceed
those limits. Set to (ymin,ymax). Use None if only setting
minimum or maximum limit, i.e. (22., None).
time_rng : tuple or list
List or tuple with (min, max) values to set the x-axis range
limits.
y_rng : tuple or list
List or tuple with (min, max) values to set the y-axis range
use_var_for_y : str
Set this to the name of a data variable in the Dataset to use as
the y-axis variable instead of the default dimension. Useful for
instances where data has an index-based dimension instead of a
height-based dimension. If shapes of arrays do not match it will
automatically revert back to the original ydata.
set_shading : string
Option to to set the matplotlib.pcolormesh shading parameter.
Default to 'auto'
assessment_overplot : boolean
Option to overplot quality control colored symbols over plotted
data using flag_assessment categories.
overplot_marker : str
Marker to use for overplot symbol.
overplot_behind : bool
Place the overplot marker behind the data point.
overplot_markersize : float or int
Size of overplot marker. If overplot_behind or force_line_plot
are set the marker size will be double overplot_markersize so
the color is visible.
assessment_overplot_category : dict
Lookup to categorize assessments into groups. This allows using
multiple terms for the same quality control level of failure.
Also allows adding more to the defaults.
assessment_overplot_category_color : dict
Lookup to match overplot category color to assessment grouping.
force_line_plot : boolean
Option to plot 2D data as 1D line plots.
labels : boolean or list
Option to overwrite the legend labels. Must have same dimensions as
number of lines plotted.
cbar_label : str
Option to overwrite default colorbar label.
cbar_h_adjust : float
Option to adjust location of colorbar horizontally. Positive values
move to right negative values move to left.
secondary_y : boolean
Option to plot on secondary y axis.
y_axis_flag_meanings : boolean or int
When set to True and plotting state variable with flag_values and
flag_meanings attribures will replace y axis numerical values
with flag_meanings value. Set to a positive number larger than 1
to indicate maximum word length to use. If text is longer that the
value and has space characters will split text over multiple lines.
colorbar_labels : dict
A dictionary containing values for plotting a 2D array of state variables.
The dictionary uses data values as keys and a dictionary containing keys
'text' and 'color' for each data value to plot.
Example:
{0: {'text': 'Clear sky', 'color': 'white'},
1: {'text': 'Liquid', 'color': 'green'},
2: {'text': 'Ice', 'color': 'blue'},
3: {'text': 'Mixed phase', 'color': 'purple'}}
**kwargs : keyword arguments
The keyword arguments for :func:`plt.plot` (1D timeseries) or
:func:`plt.pcolormesh` (2D timeseries).
Returns
-------
ax : matplotlib axis handle
The matplotlib axis handle of the plot.
"""
if dsname is None and len(self._obj.keys()) > 1:
raise ValueError(
'You must choose a datastream when there are 2 '
'or more datasets in the TimeSeriesDisplay '
'object.'
)
elif dsname is None:
dsname = list(self._obj.keys())[0]
if y_axis_flag_meanings:
kwargs['linestyle'] = ''
# Get data and dimensions
data = self._obj[dsname][field]
dim = list(self._obj[dsname][field].dims)
xdata = self._obj[dsname][dim[0]]
if 'units' in data.attrs:
ytitle = ''.join(['(', data.attrs['units'], ')'])
else:
ytitle = field
if cbar_label is None:
cbar_default = ytitle
if len(dim) > 1:
if use_var_for_y is None:
ydata = self._obj[dsname][dim[1]]
else:
ydata = self._obj[dsname][use_var_for_y]
ydata_dim1 = self._obj[dsname][dim[1]]
if np.shape(ydata) != np.shape(ydata_dim1):
ydata = ydata_dim1
units = ytitle
if 'units' in ydata.attrs.keys():
units = ydata.attrs['units']
ytitle = ''.join(['(', units, ')'])
else:
units = ''
ytitle = dim[1]
# Create labels if 2d as 1d
if force_line_plot is True:
if labels is True:
labels = [' '.join([str(d), units]) for d in ydata.values]
ytitle = f"({data.attrs['units']})"
ydata = None
else:
ydata = None
# Get the current plotting axis, add day/night background and plot data
if self.fig is None:
self.fig = plt.figure()
if self.axes is None:
self.axes = np.array([plt.axes()])
self.fig.add_axes(self.axes[0])
# Set up secondary y axis if requested
if secondary_y is False:
ax = self.axes[subplot_index]
else:
ax = self.axes[subplot_index].twinx()
if colorbar_labels is not None:
flag_values = list(colorbar_labels.keys())
flag_meanings = [value['text'] for key, value in colorbar_labels.items()]
cbar_colors = [value['color'] for key, value in colorbar_labels.items()]
cmap = mpl.colors.ListedColormap(cbar_colors)
for ii, flag_meaning in enumerate(flag_meanings):
if len(flag_meaning) > 20:
flag_meaning = textwrap.fill(flag_meaning, width=20)
flag_meanings[ii] = flag_meaning
else:
flag_values = None
flag_meanings = None
cbar_colors = None
if ydata is None:
# Add in nans to ensure the data does not connect the line.
if add_nan is True:
xdata, data = data_utils.add_in_nan(xdata, data)
if day_night_background is True:
self.day_night_background(subplot_index=subplot_index, dsname=dsname)
# If limiting data being plotted use masked arrays
# Need to do it this way because of autoscale() method
if abs_limits[0] is not None and abs_limits[1] is not None:
data = np.ma.masked_outside(data, abs_limits[0], abs_limits[1])
elif abs_limits[0] is not None and abs_limits[1] is None:
data = np.ma.masked_less_equal(data, abs_limits[0])
elif abs_limits[0] is None and abs_limits[1] is not None:
data = np.ma.masked_greater_equal(data, abs_limits[1])
# Plot the data
if 'marker' not in kwargs.keys():
kwargs['marker'] = '.'
lines = ax.plot(xdata, data, **kwargs)
# Check if we need to call legend method after plotting. This is only
# called when no assessment overplot is called.
add_legend = False
if 'label' in kwargs.keys():
add_legend = True
# Overplot failing data if requested
if assessment_overplot:
# If we are doing forced line plot from 2D data need to manage
# legend lables. Will make arrays to hold labels of QC failing
# because not set when labels not set.
if not isinstance(labels, list) and add_legend is False:
labels = []
lines = []
# For forced line plot need to plot QC behind point instead of
# on top of point.
zorder = None
if force_line_plot or overplot_behind:
zorder = 0
overplot_markersize *= 2.0
for assessment, categories in assessment_overplot_category.items():
flag_data = self._obj[dsname].qcfilter.get_masked_data(
field, rm_assessments=categories, return_inverse=True
)
if np.invert(flag_data.mask).any() and np.isfinite(flag_data).any():
try:
flag_data.mask = np.logical_or(data.mask, flag_data.mask)
except AttributeError:
pass
qc_ax = ax.plot(
xdata,
flag_data,
marker=overplot_marker,
linestyle='',
markersize=overplot_markersize,
color=assessment_overplot_category_color[assessment],
label=assessment,
zorder=zorder,
)
# If labels keyword is set need to add labels for calling legend
if isinstance(labels, list):
# If plotting forced_line_plot need to subset the Line2D object
# so we don't have more than one added to legend.
if len(qc_ax) > 1:
lines.extend(qc_ax[:1])
else:
lines.extend(qc_ax)
labels.append(assessment)
add_legend = True
# Add legend if labels are available
if isinstance(labels, list):
ax.legend(lines, labels)
elif add_legend:
ax.legend()
# Change y axis to text from flag_meanings if requested.
if y_axis_flag_meanings:
flag_meanings = self._obj[dsname][field].attrs['flag_meanings']
flag_values = self._obj[dsname][field].attrs['flag_values']
# If keyword is larger than 1 assume this is the maximum character length
# desired and insert returns to wrap text.
if y_axis_flag_meanings > 1:
for ii, flag_meaning in enumerate(flag_meanings):
if len(flag_meaning) > y_axis_flag_meanings:
flag_meaning = textwrap.fill(flag_meaning, width=y_axis_flag_meanings)
flag_meanings[ii] = flag_meaning
ax.set_yticks(flag_values)
ax.set_yticklabels(flag_meanings)
else:
# Add in nans to ensure the data are not streaking
if add_nan is True:
xdata, data = data_utils.add_in_nan(xdata, data)
# Sets shading parameter to auto. Matplotlib will check deminsions.
# If X,Y and C are same deminsions shading is set to nearest.
# If X and Y deminsions are 1 greater than C shading is set to flat.
if 'edgecolors' not in kwargs.keys():
kwargs['edgecolors'] = 'face'
mesh = ax.pcolormesh(
np.asarray(xdata),
ydata,
data.transpose(),
shading=set_shading,
cmap=cmap,
**kwargs,
)
# Set Title
if set_title is None:
if isinstance(self._obj[dsname].time.values[0], np.datetime64):
set_title = ' '.join(
[
dsname,
field,
'on',
dt_utils.numpy_to_arm_date(self._obj[dsname].time.values[0]),
]
)
else:
date_result = search(
r'\d{4}-\d{1,2}-\d{1,2}', self._obj[dsname].time.attrs['units']
)
if date_result is not None:
set_title = ' '.join([dsname, field, 'on', date_result.group(0)])
else:
set_title = ' '.join([dsname, field])
if secondary_y is False:
ax.set_title(set_title)
# Set YTitle
if not y_axis_flag_meanings:
ax.set_ylabel(ytitle)
# Set X Limit - We want the same time axes for all subplots
if not hasattr(self, 'time_rng'):
if time_rng is not None:
self.time_rng = list(time_rng)
else:
self.time_rng = [xdata.min().values, xdata.max().values]
self.set_xrng(self.time_rng, subplot_index)
# Set Y Limit
if y_rng is not None:
self.set_yrng(y_rng)
if hasattr(self, 'yrng'):
# Make sure that the yrng is not just the default
if ydata is None:
if abs_limits[0] is not None or abs_limits[1] is not None:
our_data = data
else:
our_data = data.values
else:
our_data = ydata
finite = np.isfinite(our_data)
if finite.any():
our_data = our_data[finite]
if invert_y_axis is False:
yrng = [np.min(our_data), np.max(our_data)]
else:
yrng = [np.max(our_data), np.min(our_data)]
else:
yrng = [0, 1]
# Check if current range is outside of new range an only set
# values that work for all data plotted.
if isinstance(yrng[0], np.datetime64):
yrng = mdates.datestr2num([str(yrng[0]), str(yrng[1])])
current_yrng = ax.get_ylim()
if invert_y_axis is False:
if yrng[0] > current_yrng[0]:
yrng[0] = current_yrng[0]
if yrng[1] < current_yrng[1]:
yrng[1] = current_yrng[1]
else:
if yrng[0] < current_yrng[0]:
yrng[0] = current_yrng[0]
if yrng[1] > current_yrng[1]:
yrng[1] = current_yrng[1]
# Set y range the normal way if not secondary y
# If secondary, just use set_ylim
if secondary_y is False:
self.set_yrng(yrng, subplot_index)
else:
ax.set_ylim(yrng)
# Set X Format
if len(subplot_index) == 1:
days = self.xrng[subplot_index, 1] - self.xrng[subplot_index, 0]
else:
days = self.xrng[subplot_index][1] - self.xrng[subplot_index][0]
myFmt = common.get_date_format(days)
ax.xaxis.set_major_formatter(myFmt)
# Set X format - We want the same time axes for all subplots
if not hasattr(self, 'time_fmt'):
self.time_fmt = myFmt
# Put on an xlabel, but only if we are making the bottom-most plot
if subplot_index[0] == self.axes.shape[0] - 1:
ax.set_xlabel('Time [UTC]')
if ydata is not None:
if cbar_label is None:
cbar_title = cbar_default
else:
cbar_title = ''.join(['(', cbar_label, ')'])
if colorbar_labels is not None:
cbar_title = None
cbar = self.add_colorbar(
mesh,
title=cbar_title,
subplot_index=subplot_index,
values=flag_values,
pad=cbar_h_adjust,
)
cbar.set_ticks(flag_values)
cbar.set_ticklabels(flag_meanings)
cbar.ax.tick_params(labelsize=10)
else:
self.add_colorbar(
mesh, title=cbar_title, subplot_index=subplot_index, pad=cbar_h_adjust
)
return ax
def plot_barbs_from_spd_dir(
self, speed_field, direction_field, pres_field=None, dsname=None, **kwargs
):
"""
This procedure will make a wind barb plot timeseries.
If a pressure field is given and the wind fields are 1D, which, for
example, would occur if one wants to plot a timeseries of
rawinsonde data, then a time-height cross section of
winds will be made.
Note: This procedure calls plot_barbs_from_u_v and will take in the
same keyword arguments as that procedure.
Parameters
----------
speed_field : str
The name of the field specifying the wind speed in m/s.
direction_field : str
The name of the field specifying the wind direction in degrees.
0 degrees is defined to be north and increases clockwise like
what is used in standard meteorological notation.
pres_field : str
The name of the field specifying pressure or height. If using
height coordinates, then we recommend setting invert_y_axis
to False.
dsname : str
The name of the datastream to plot. Setting to None will make
ACT attempt to autodetect this.
kwargs : dict
Any additional keyword arguments will be passed into
:func:`act.plotting.TimeSeriesDisplay.plot_barbs_from_u_and_v`.
Returns
-------
the_ax : matplotlib axis handle
The handle to the axis where the plot was made on.
Examples
--------
..code-block :: python
sonde_ds = act.io.armfiles.read_netcdf(
act.tests.sample_files.EXAMPLE_TWP_SONDE_WILDCARD)
BarbDisplay = act.plotting.TimeSeriesDisplay(
{'sonde_darwin': sonde_ds}, figsize=(10,5))
BarbDisplay.plot_barbs_from_spd_dir('deg', 'wspd', 'pres',
num_barbs_x=20)
"""
if dsname is None and len(self._obj.keys()) > 1:
raise ValueError(
'You must choose a datastream when there are 2 '
'or more datasets in the TimeSeriesDisplay '
'object.'
)
elif dsname is None:
dsname = list(self._obj.keys())[0]
# Make temporary field called tempu, tempv
spd = self._obj[dsname][speed_field]
dir = self._obj[dsname][direction_field]
tempu = -np.sin(np.deg2rad(dir)) * spd
tempv = -np.cos(np.deg2rad(dir)) * spd
self._obj[dsname]['temp_u'] = deepcopy(self._obj[dsname][speed_field])
self._obj[dsname]['temp_v'] = deepcopy(self._obj[dsname][speed_field])
self._obj[dsname]['temp_u'].values = tempu
self._obj[dsname]['temp_v'].values = tempv
the_ax = self.plot_barbs_from_u_v('temp_u', 'temp_v', pres_field, dsname, **kwargs)
del self._obj[dsname]['temp_u'], self._obj[dsname]['temp_v']
return the_ax
def plot_barbs_from_u_v(
self,
u_field,
v_field,
pres_field=None,
dsname=None,
subplot_index=(0,),
set_title=None,
day_night_background=False,
invert_y_axis=True,
num_barbs_x=20,
num_barbs_y=20,
use_var_for_y=None,
**kwargs,
):
"""
This function will plot a wind barb timeseries from u and v wind
data. If pres_field is given, a time-height series will be plotted
from 1-D wind data.
Parameters
----------
u_field : str
The name of the field containing the U component of the wind.
v_field : str
The name of the field containing the V component of the wind.
pres_field : str or None
The name of the field containing the pressure or height. Set
to None to not use this.
dsname : str or None
The name of the datastream to plot. Setting to None will make
ACT automatically try to determine this.
subplot_index : 2-tuple
The index of the subplot to make the plot on.
set_title : str or None
The title of the plot.
day_night_background : bool
Set to True to plot a day/night background.
invert_y_axis : bool
Set to True to invert the y axis (i.e. for plotting pressure as
the height coordinate).
num_barbs_x : int
The number of wind barbs to plot in the x axis.
num_barbs_y : int
The number of wind barbs to plot in the y axis.
cmap : matplotlib.colors.LinearSegmentedColormap
A color map to use with wind barbs. If this is set the plt.barbs
routine will be passed the C parameter scaled as sqrt of sum of the
squares and used with the passed in color map. A colorbar will also
be added. Setting the limits of the colorbar can be done with 'clim'.
Setting this changes the wind barbs from black to colors.
use_var_for_y : str
Set this to the name of a data variable in the Dataset to use as the
y-axis variable instead of the default dimension. Useful for instances
where data has an index-based dimension instead of a height-based
dimension. If shapes of arrays do not match it will automatically
revert back to the original ydata.
**kwargs : keyword arguments
Additional keyword arguments will be passed into plt.barbs.
Returns
-------
ax : matplotlib axis handle
The axis handle that contains the reference to the
constructed plot.
"""
if dsname is None and len(self._obj.keys()) > 1:
raise ValueError(
'You must choose a datastream when there are 2 '
'or more datasets in the TimeSeriesDisplay '
'object.'
)
elif dsname is None:
dsname = list(self._obj.keys())[0]
# Get data and dimensions
u = self._obj[dsname][u_field].values
v = self._obj[dsname][v_field].values
dim = list(self._obj[dsname][u_field].dims)
xdata = self._obj[dsname][dim[0]].values
num_x = xdata.shape[-1]
barb_step_x = round(num_x / num_barbs_x)
if barb_step_x == 0:
barb_step_x = 1
if len(dim) > 1 and pres_field is None:
if use_var_for_y is None:
ydata = self._obj[dsname][dim[1]]
else:
ydata = self._obj[dsname][use_var_for_y]
ydata_dim1 = self._obj[dsname][dim[1]]
if np.shape(ydata) != np.shape(ydata_dim1):
ydata = ydata_dim1
if 'units' in ydata.attrs:
units = ydata.attrs['units']
else:
units = ''
ytitle = ''.join(['(', units, ')'])
num_y = ydata.shape[0]
barb_step_y = round(num_y / num_barbs_y)
if barb_step_y == 0:
barb_step_y = 1
xdata, ydata = np.meshgrid(xdata, ydata, indexing='ij')
elif pres_field is not None:
# What we will do here is do a nearest-neighbor interpolation
# for each member of the series. Coordinates are time, pressure
pres = self._obj[dsname][pres_field]
u_interp = NearestNDInterpolator((xdata, pres.values), u, rescale=True)
v_interp = NearestNDInterpolator((xdata, pres.values), v, rescale=True)
barb_step_x = 1
barb_step_y = 1
x_times = pd.date_range(xdata.min(), xdata.max(), periods=num_barbs_x)
if num_barbs_y == 1:
y_levels = pres.mean()
else:
y_levels = np.linspace(np.nanmin(pres), np.nanmax(pres), num_barbs_y)
xdata, ydata = np.meshgrid(x_times, y_levels, indexing='ij')
u = u_interp(xdata, ydata)
v = v_interp(xdata, ydata)
if 'units' in pres.attrs:
units = pres.attrs['units']
else:
units = ''
ytitle = ''.join(['(', units, ')'])
else:
ydata = None
# Get the current plotting axis, add day/night background and plot data
if self.fig is None:
self.fig = plt.figure()
if self.axes is None:
self.axes = np.array([plt.axes()])
self.fig.add_axes(self.axes[0])
if ydata is None:
ydata = np.ones(xdata.shape)
if 'cmap' in kwargs.keys():
map_color = np.sqrt(np.power(u[::barb_step_x], 2) + np.power(v[::barb_step_x], 2))
map_color[np.isnan(map_color)] = 0
ax = self.axes[subplot_index].barbs(
xdata[::barb_step_x],
ydata[::barb_step_x],
u[::barb_step_x],
v[::barb_step_x],
map_color,
**kwargs,
)
plt.colorbar(
ax,
ax=[self.axes[subplot_index]],
label='Wind Speed (' + self._obj[dsname][u_field].attrs['units'] + ')',
)
else:
self.axes[subplot_index].barbs(
xdata[::barb_step_x],
ydata[::barb_step_x],
u[::barb_step_x],
v[::barb_step_x],
**kwargs,
)
self.axes[subplot_index].set_yticks([])
else:
if 'cmap' in kwargs.keys():
map_color = np.sqrt(
np.power(u[::barb_step_x, ::barb_step_y], 2)
+ np.power(v[::barb_step_x, ::barb_step_y], 2)
)
map_color[np.isnan(map_color)] = 0
ax = self.axes[subplot_index].barbs(
xdata[::barb_step_x, ::barb_step_y],
ydata[::barb_step_x, ::barb_step_y],
u[::barb_step_x, ::barb_step_y],
v[::barb_step_x, ::barb_step_y],
map_color,
**kwargs,
)
plt.colorbar(
ax,
ax=[self.axes[subplot_index]],
label='Wind Speed (' + self._obj[dsname][u_field].attrs['units'] + ')',
)
else:
ax = self.axes[subplot_index].barbs(
xdata[::barb_step_x, ::barb_step_y],
ydata[::barb_step_x, ::barb_step_y],
u[::barb_step_x, ::barb_step_y],
v[::barb_step_x, ::barb_step_y],
**kwargs,
)
if day_night_background is True:
self.day_night_background(subplot_index=subplot_index, dsname=dsname)
# Set Title
if set_title is None:
set_title = ' '.join(
[
dsname,
'on',
dt_utils.numpy_to_arm_date(self._obj[dsname].time.values[0]),
]
)
self.axes[subplot_index].set_title(set_title)
# Set YTitle
if 'ytitle' in locals():
self.axes[subplot_index].set_ylabel(ytitle)
# Set X Limit - We want the same time axes for all subplots
time_rng = [xdata.min(), xdata.max()]
self.set_xrng(time_rng, subplot_index)
# Set Y Limit
if hasattr(self, 'yrng'):
# Make sure that the yrng is not just the default
if not np.all(self.yrng[subplot_index] == 0):
self.set_yrng(self.yrng[subplot_index], subplot_index)
else:
if ydata is None:
our_data = xdata
else:
our_data = ydata
if np.isfinite(our_data).any():
if invert_y_axis is False:
yrng = [np.nanmin(our_data), np.nanmax(our_data)]
else:
yrng = [np.nanmax(our_data), np.nanmin(our_data)]
else:
yrng = [0, 1]
self.set_yrng(yrng, subplot_index)
# Set X Format
if len(subplot_index) == 1:
days = self.xrng[subplot_index, 1] - self.xrng[subplot_index, 0]
else:
days = (
self.xrng[subplot_index[0], subplot_index[1], 1]
- self.xrng[subplot_index[0], subplot_index[1], 0]
)
# Put on an xlabel, but only if we are making the bottom-most plot
if subplot_index[0] == self.axes.shape[0] - 1:
self.axes[subplot_index].set_xlabel('Time [UTC]')
myFmt = common.get_date_format(days)
self.axes[subplot_index].xaxis.set_major_formatter(myFmt)
return self.axes[subplot_index]
def plot_time_height_xsection_from_1d_data(
self,
data_field,
pres_field,
dsname=None,
subplot_index=(0,),
set_title=None,
day_night_background=False,
num_time_periods=20,
num_y_levels=20,
invert_y_axis=True,
cbar_label=None,
set_shading='auto',
**kwargs,
):
"""
This will plot a time-height cross section from 1D datasets using
nearest neighbor interpolation on a regular time by height grid.
All that is needed are a data variable and a height variable.
Parameters
----------
data_field : str
The name of the field to plot.
pres_field : str
The name of the height or pressure field to plot.
dsname : str or None
The name of the datastream to plot
subplot_index : 2-tuple
The index of the subplot to create the plot on.
set_title : str or None
The title of the plot.
day_night_background : bool
Set to true to plot the day/night background.
num_time_periods : int
Set to determine how many time periods. Setting to None
will do one time period per day.
num_y_levels : int
The number of levels in the y axis to use.
invert_y_axis : bool
Set to true to invert the y-axis (recommended for
pressure coordinates).
cbar_label : str
Option to overwrite default colorbar label.
set_shading : string
Option to to set the matplotlib.pcolormesh shading parameter.
Default to 'auto'
**kwargs : keyword arguments
Additional keyword arguments will be passed
into :func:`plt.pcolormesh`
Returns
-------
ax : matplotlib axis handle
The matplotlib axis handle pointing to the plot.
"""
if dsname is None and len(self._obj.keys()) > 1:
raise ValueError(
'You must choose a datastream when there are 2'
'or more datasets in the TimeSeriesDisplay'
'object.'
)
elif dsname is None:
dsname = list(self._obj.keys())[0]
dim = list(self._obj[dsname][data_field].dims)
if len(dim) > 1:
raise ValueError(
'plot_time_height_xsection_from_1d_data only '
'supports 1-D datasets. For datasets with 2 or '
'more dimensions use plot().'
)
# Get data and dimensions
data = self._obj[dsname][data_field].values
xdata = self._obj[dsname][dim[0]].values
# What we will do here is do a nearest-neighbor interpolation for each
# member of the series. Coordinates are time, pressure
pres = self._obj[dsname][pres_field]
u_interp = NearestNDInterpolator((xdata, pres.values), data, rescale=True)
# Mask points where we have no data
# Count number of unique days
x_times = pd.date_range(xdata.min(), xdata.max(), periods=num_time_periods)
y_levels = np.linspace(np.nanmin(pres), np.nanmax(pres), num_y_levels)
tdata, ydata = np.meshgrid(x_times, y_levels, indexing='ij')
data = u_interp(tdata, ydata)
ytitle = ''.join(['(', pres.attrs['units'], ')'])
units = data_field + ' (' + self._obj[dsname][data_field].attrs['units'] + ')'
# Get the current plotting axis, add day/night background and plot data
if self.fig is None:
self.fig = plt.figure()
if self.axes is None:
self.axes = np.array([plt.axes()])
self.fig.add_axes(self.axes[0])
mesh = self.axes[subplot_index].pcolormesh(
x_times, y_levels, np.transpose(data), shading=set_shading, **kwargs
)
if day_night_background is True:
self.day_night_background(subplot_index=subplot_index, dsname=dsname)
# Set Title
if set_title is None:
set_title = ' '.join(
[
dsname,
'on',
dt_utils.numpy_to_arm_date(self._obj[dsname].time.values[0]),
]
)
self.axes[subplot_index].set_title(set_title)
# Set YTitle
if 'ytitle' in locals():
self.axes[subplot_index].set_ylabel(ytitle)
# Set X Limit - We want the same time axes for all subplots
time_rng = [x_times[0], x_times[-1]]
self.set_xrng(time_rng, subplot_index)
# Set Y Limit
if hasattr(self, 'yrng'):
# Make sure that the yrng is not just the default
if not np.all(self.yrng[subplot_index] == 0):
self.set_yrng(self.yrng[subplot_index], subplot_index)
else:
if ydata is None:
our_data = data.values
else:
our_data = ydata
if np.isfinite(our_data).any():
if invert_y_axis is False:
yrng = [np.nanmin(our_data), np.nanmax(our_data)]
else:
yrng = [np.nanmax(our_data), np.nanmin(our_data)]
else:
yrng = [0, 1]
self.set_yrng(yrng, subplot_index)
# Set X Format
if len(subplot_index) == 1:
days = self.xrng[subplot_index, 1] - self.xrng[subplot_index, 0]
else:
days = (
self.xrng[subplot_index[0], subplot_index[1], 1]
- self.xrng[subplot_index[0], subplot_index[1], 0]
)
# Put on an xlabel, but only if we are making the bottom-most plot
if subplot_index[0] == self.axes.shape[0] - 1:
self.axes[subplot_index].set_xlabel('Time [UTC]')
if ydata is not None:
if cbar_label is None:
self.add_colorbar(mesh, title=units, subplot_index=subplot_index)
else:
self.add_colorbar(mesh, title=cbar_label, subplot_index=subplot_index)
myFmt = common.get_date_format(days)
self.axes[subplot_index].xaxis.set_major_formatter(myFmt)
return self.axes[subplot_index]
def time_height_scatter(
self,
data_field=None,
dsname=None,
cmap='rainbow',
alt_label=None,
alt_field='alt',
cb_label=None,
**kwargs,
):
"""
Create a time series plot of altitude and data variable with
color also indicating value with a color bar. The Color bar is
positioned to serve both as the indicator of the color intensity
and the second y-axis.
Parameters
----------
data_field : str
Name of data field in the object to plot on second y-axis.
height_field : str
Name of height field in the object to plot on first y-axis.
dsname : str or None
The name of the datastream to plot.
cmap : str
Colorbar color map to use.
alt_label : str
Altitude first y-axis label to use. If None, will try to use
long_name and units.
alt_field : str
Label for field in the object to plot on first y-axis.
cb_label : str
Colorbar label to use. If not set will try to use
long_name and units.
**kwargs : keyword arguments
Any other keyword arguments that will be passed
into TimeSeriesDisplay.plot module when the figure
is made.
"""
if dsname is None and len(self._obj.keys()) > 1:
raise ValueError(
'You must choose a datastream when there are 2 '
'or more datasets in the TimeSeriesDisplay '
'object.'
)
elif dsname is None:
dsname = list(self._obj.keys())[0]
# Get data and dimensions
data = self._obj[dsname][data_field]
altitude = self._obj[dsname][alt_field]
dim = list(self._obj[dsname][data_field].dims)
xdata = self._obj[dsname][dim[0]]
if alt_label is None:
try:
alt_label = altitude.attrs['long_name'] + ''.join(
[' (', altitude.attrs['units'], ')']
)
except KeyError:
alt_label = alt_field
if cb_label is None:
try:
cb_label = data.attrs['long_name'] + ''.join([' (', data.attrs['units'], ')'])
except KeyError:
cb_label = data_field
colorbar_map = plt.cm.get_cmap(cmap)
self.fig.subplots_adjust(left=0.1, right=0.86, bottom=0.16, top=0.91)
ax1 = self.plot(alt_field, color='black', **kwargs)
ax1.set_ylabel(alt_label)
ax2 = ax1.twinx()
sc = ax2.scatter(xdata.values, data.values, c=data.values, marker='.', cmap=colorbar_map)
cbaxes = self.fig.add_axes(
[
self.fig.subplotpars.right + 0.02,
self.fig.subplotpars.bottom,
0.02,
self.fig.subplotpars.top - self.fig.subplotpars.bottom,
]
)
cbar = plt.colorbar(sc, cax=cbaxes)
ax2.set_ylim(cbar.mappable.get_clim())
cbar.ax.set_ylabel(cb_label)
ax2.set_yticklabels([])
return self.axes[0]
def qc_flag_block_plot(
self,
data_field=None,
dsname=None,
subplot_index=(0,),
time_rng=None,
assessment_color=None,
edgecolor='face',
set_shading='auto',
**kwargs,
):
"""
Create a time series plot of embedded quality control values
using broken barh plotting.
Parameters
----------
data_field : str
Name of data field in the object to plot corresponding quality
control.
dsname : None or str
If there is more than one datastream in the display object the
name of the datastream needs to be specified. If set to None and
there is only one datastream ACT will use the sole datastream
in the object.
subplot_index : 1 or 2D tuple, list, or array
The index of the subplot to set the x range of.
time_rng : tuple or list
List or tuple with (min, max) values to set the x-axis range limits.
assessment_color : dict
Dictionary lookup to override default assessment to color. Make sure
assessment work is correctly set with case syntax.
edgecolor : str or list
Color name, list of color names or 'face' as defined in matplotlib.axes.Axes.broken_barh
set_shading : string
Option to to set the matplotlib.pcolormesh shading parameter.
Default to 'auto'
**kwargs : keyword arguments
The keyword arguments for :func:`plt.broken_barh`.
"""
# Color to plot associated with assessment.
color_lookup = {
'Bad': 'red',
'Incorrect': 'red',
'Indeterminate': 'orange',
'Suspect': 'orange',
'Missing': 'darkgray',
'Not Failing': 'green',
'Acceptable': 'green',
}
if assessment_color is not None:
for asses, color in assessment_color.items():
color_lookup[asses] = color
if asses == 'Incorrect':
color_lookup['Bad'] = color
if asses == 'Suspect':
color_lookup['Indeterminate'] = color
# Set up list of test names to use for missing values
missing_val_long_names = [
'Value equal to missing_value*',
'Value set to missing_value*',
'Value is equal to missing_value*',
'Value is set to missing_value*',
]
if dsname is None and len(self._obj.keys()) > 1:
raise ValueError(
'You must choose a datastream when there are 2 '
'or more datasets in the TimeSeriesDisplay '
'object.'
)
elif dsname is None:
dsname = list(self._obj.keys())[0]
# Set up or get current plot figure
if self.fig is None:
self.fig = plt.figure()
# Set up or get current axes
if self.axes is None:
self.axes = np.array([plt.axes()])
self.fig.add_axes(self.axes[0])
ax = self.axes[subplot_index]
# Set X Limit - We want the same time axes for all subplots
data = self._obj[dsname][data_field]
dim = list(self._obj[dsname][data_field].dims)
xdata = self._obj[dsname][dim[0]]
# Get data and attributes
qc_data_field = self._obj[dsname].qcfilter.check_for_ancillary_qc(
data_field, add_if_missing=False, cleanup=False
)
if qc_data_field is None:
raise ValueError(f'No quality control ancillary variable in Dataset for {data_field}')
flag_masks = self._obj[dsname][qc_data_field].attrs['flag_masks']
flag_meanings = self._obj[dsname][qc_data_field].attrs['flag_meanings']
flag_assessments = self._obj[dsname][qc_data_field].attrs['flag_assessments']
# Get time ranges for green blocks
time_delta = determine_time_delta(xdata.values)
barh_list_green = reduce_time_ranges(xdata.values, time_delta=time_delta, broken_barh=True)
# Set background to gray indicating not available data
ax.set_facecolor('dimgray')
# Check if plotting 2D data vs 1D data. 2D data will be summarized by
# assessment category instead of showing each test.
data_shape = self._obj[dsname][qc_data_field].shape
if len(data_shape) > 1:
cur_assessments = list(set(flag_assessments))
cur_assessments.sort()
cur_assessments.reverse()
qc_data = np.full(data_shape, -1, dtype=np.int16)
plot_colors = []
tick_names = []
index = self._obj[dsname][qc_data_field].values == 0
if index.any():
qc_data[index] = 0
plot_colors.append(color_lookup['Not Failing'])
tick_names.append('Not Failing')
for ii, assess in enumerate(cur_assessments):
if assess not in color_lookup:
color_lookup[assess] = list(mplcolors.CSS4_COLORS.keys())[ii]
ii += 1
assess_data = self._obj[dsname].qcfilter.get_masked_data(
data_field, rm_assessments=assess
)
if assess_data.mask.any():
qc_data[assess_data.mask] = ii
plot_colors.append(color_lookup[assess])
tick_names.append(assess)
# Overwrite missing data. Not sure if we want to do this because VAPs set
# the value to missing but the test is set to Bad. This tries to overcome that
# by looking for correct test description that would only indicate the values
# are missing not that they are set to missing by a test... most likely.
missing_test_nums = []
for ii, flag_meaning in enumerate(flag_meanings):
# Check if the bit set is indicating missing data.
for val in missing_val_long_names:
if re_search(val, flag_meaning):
test_num = parse_bit(flag_masks[ii])[0]
missing_test_nums.append(test_num)
assess_data = self._obj[dsname].qcfilter.get_masked_data(
data_field, rm_tests=missing_test_nums
)
if assess_data.mask.any():
qc_data[assess_data.mask] = -1
plot_colors.append(color_lookup['Missing'])
tick_names.append('Missing')
# Create a masked array to allow not plotting where values are missing
qc_data = np.ma.masked_equal(qc_data, -1)
dims = self._obj[dsname][qc_data_field].dims
xvalues = self._obj[dsname][dims[0]].values
yvalues = self._obj[dsname][dims[1]].values
cMap = mplcolors.ListedColormap(plot_colors)
mesh = ax.pcolormesh(
xvalues,
yvalues,
np.transpose(qc_data),
cmap=cMap,
vmin=0,
shading=set_shading,
)
divider = make_axes_locatable(ax)
# Determine correct placement of words on colorbar
tick_nums = (
np.arange(0, len(tick_names) * 2 + 1) / (len(tick_names) * 2) * np.nanmax(qc_data)
)[1::2]
cax = divider.append_axes('bottom', size='5%', pad=0.3)
cbar = self.fig.colorbar(
mesh,
cax=cax,
orientation='horizontal',
spacing='uniform',
ticks=tick_nums,
shrink=0.5,
)
cbar.ax.set_xticklabels(tick_names)
# Set YTitle
dim_name = list(set(self._obj[dsname][qc_data_field].dims) - {'time'})
try:
ytitle = f"{dim_name[0]} ({self._obj[dsname][dim_name[0]].attrs['units']})"
ax.set_ylabel(ytitle)
except KeyError:
pass
# Add which tests were set as text to the plot
unique_values = []
for ii in np.unique(self._obj[dsname][qc_data_field].values):
unique_values.extend(parse_bit(ii))
if len(unique_values) > 0:
unique_values = list(set(unique_values))
unique_values.sort()
unique_values = [str(ii) for ii in unique_values]
self.fig.text(
0.5,
-0.35,
f"QC Tests Tripped: {', '.join(unique_values)}",
transform=ax.transAxes,
horizontalalignment='center',
verticalalignment='center',
fontweight='bold',
)
else:
test_nums = []
for ii, assess in enumerate(flag_assessments):
if assess not in color_lookup:
color_lookup[assess] = list(mplcolors.CSS4_COLORS.keys())[ii]
# Plot green data first.
ax.broken_barh(
barh_list_green,
(ii, ii + 1),
facecolors=color_lookup['Not Failing'],
edgecolor=edgecolor,
**kwargs,
)
# Get test number from flag_mask bitpacked number
test_nums.append(parse_bit(flag_masks[ii]))
# Get masked array data to use mask for finding if/where test is set
data = self._obj[dsname].qcfilter.get_masked_data(
data_field, rm_tests=test_nums[-1]
)
if np.any(data.mask):
# Get time ranges from time and masked data
barh_list = reduce_time_ranges(
xdata.values[data.mask], time_delta=time_delta, broken_barh=True
)
# Check if the bit set is indicating missing data. If so change
# to different plotting color than what is in flag_assessments.
for val in missing_val_long_names:
if re_search(val, flag_meanings[ii]):
assess = 'Missing'
break
# Lay down blocks of tripped tests using correct color
ax.broken_barh(
barh_list,
(ii, ii + 1),
facecolors=color_lookup[assess],
edgecolor=edgecolor,
**kwargs,
)
# Add test description to plot.
ax.text(xdata.values[0], ii + 0.5, ' ' + flag_meanings[ii], va='center')
# Change y ticks to test number
plt.yticks(
[ii + 0.5 for ii in range(0, len(test_nums))],
labels=['Test ' + str(ii[0]) for ii in test_nums],
)
# Set ylimit to number of tests plotted
ax.set_ylim(0, len(flag_assessments))
# Set X Limit - We want the same time axes for all subplots
if not hasattr(self, 'time_rng'):
if time_rng is not None:
self.time_rng = list(time_rng)
else:
self.time_rng = [xdata.min().values, xdata.max().values]
self.set_xrng(self.time_rng, subplot_index)
# Get X format - We want the same time axes for all subplots
if hasattr(self, 'time_fmt'):
ax.xaxis.set_major_formatter(self.time_fmt)
else:
# Set X Format
if len(subplot_index) == 1:
days = self.xrng[subplot_index, 1] - self.xrng[subplot_index, 0]
else:
days = (
self.xrng[subplot_index[0], subplot_index[1], 1]
- self.xrng[subplot_index[0], subplot_index[1], 0]
)
myFmt = common.get_date_format(days)
ax.xaxis.set_major_formatter(myFmt)
self.time_fmt = myFmt
return self.axes[subplot_index]
def fill_between(
self,
field,
dsname=None,
subplot_index=(0,),
set_title=None,
secondary_y=False,
**kwargs,
):
"""
Makes a fill_between plot, based on matplotlib
Parameters
----------
field : str
The name of the field to plot.
dsname : None or str
If there is more than one datastream in the display object the
name of the datastream needs to be specified. If set to None and
there is only one datastream ACT will use the sole datastream
in the object.
subplot_index : 1 or 2D tuple, list, or array
The index of the subplot to set the x range of.
set_title : str
The title for the plot.
secondary_y : boolean
Option to indicate if the data should be plotted on second y-axis.
**kwargs : keyword arguments
The keyword arguments for :func:`plt.plot` (1D timeseries) or
:func:`plt.pcolormesh` (2D timeseries).
Returns
-------
ax : matplotlib axis handle
The matplotlib axis handle of the plot.
"""
if dsname is None and len(self._obj.keys()) > 1:
raise ValueError(
'You must choose a datastream when there are 2 '
'or more datasets in the TimeSeriesDisplay '
'object.'
)
elif dsname is None:
dsname = list(self._obj.keys())[0]
# Get data and dimensions
data = self._obj[dsname][field]
dim = list(self._obj[dsname][field].dims)
xdata = self._obj[dsname][dim[0]]
if 'units' in data.attrs:
ytitle = ''.join(['(', data.attrs['units'], ')'])
else:
ytitle = field
# Get the current plotting axis, add day/night background and plot data
if self.fig is None:
self.fig = plt.figure()
if self.axes is None:
self.axes = np.array([plt.axes()])
self.fig.add_axes(self.axes[0])
# Set ax to appropriate axis
if secondary_y is False:
ax = self.axes[subplot_index]
else:
ax = self.axes[subplot_index].twinx()
ax.fill_between(xdata.values, data, **kwargs)
# Set X Format
if len(subplot_index) == 1:
days = self.xrng[subplot_index, 1] - self.xrng[subplot_index, 0]
else:
days = (
self.xrng[subplot_index[0], subplot_index[1], 1]
- self.xrng[subplot_index[0], subplot_index[1], 0]
)
myFmt = common.get_date_format(days)
ax.xaxis.set_major_formatter(myFmt)
# Set X format - We want the same time axes for all subplots
if not hasattr(self, 'time_fmt'):
self.time_fmt = myFmt
# Put on an xlabel, but only if we are making the bottom-most plot
if subplot_index[0] == self.axes.shape[0] - 1:
self.axes[subplot_index].set_xlabel('Time [UTC]')
# Set YTitle
ax.set_ylabel(ytitle)
# Set Title
if set_title is None:
set_title = ' '.join(
[
dsname,
field,
'on',
dt_utils.numpy_to_arm_date(self._obj[dsname].time.values[0]),
]
)
if secondary_y is False:
ax.set_title(set_title)
return self.axes[subplot_index]
<|code_end|>
|
act/io/__init__.py
<|code_start|>"""
This module contains procedures for reading and writing various ARM datasets.
"""
import lazy_loader as lazy
__getattr__, __dir__, __all__ = lazy.attach(
__name__,
submodules=['armfiles', 'csvfiles', 'icartt', 'mpl', 'noaagml', 'noaapsl', 'pysp2'],
submod_attrs={
'armfiles': [
'WriteDataset',
'check_arm_standards',
'create_obj_from_arm_dod',
'read_netcdf',
'read_mmcr',
],
'csvfiles': ['read_csv'],
'icartt': ['read_icartt'],
'mpl': ['proc_sigma_mplv5_read', 'read_sigma_mplv5'],
'noaagml': [
'read_gml',
'read_gml_co2',
'read_gml_halo',
'read_gml_met',
'read_gml_ozone',
'read_gml_radiation',
],
'noaapsl': [
'read_psl_wind_profiler',
'read_psl_wind_profiler_temperature',
'read_psl_parsivel',
'read_psl_radar_fmcw_moment',
],
'pysp2': ['read_hk_file', 'read_sp2', 'read_sp2_dat'],
},
)
<|code_end|>
act/io/armfiles.py
<|code_start|>"""
This module contains I/O operations for loading files that were created for the
Atmospheric Radiation Measurement program supported by the Department of Energy
Office of Science.
"""
import copy
import glob
import json
import re
import urllib
import warnings
from pathlib import Path, PosixPath
from netCDF4 import Dataset
import numpy as np
import xarray as xr
import act.utils as utils
from act.config import DEFAULT_DATASTREAM_NAME
def read_netcdf(
filenames,
concat_dim=None,
return_None=False,
combine='by_coords',
use_cftime=True,
cftime_to_datetime64=True,
combine_attrs='override',
cleanup_qc=False,
keep_variables=None,
**kwargs,
):
"""
Returns `xarray.Dataset` with stored data and metadata from a user-defined
query of ARM-standard netCDF files from a single datastream. Has some procedures
to ensure time is correctly fomatted in returned Dataset.
Parameters
----------
filenames : str, pathlib.PosixPath or list of str
Name of file(s) to read.
concat_dim : str
Dimension to concatenate files along. Default value is 'None'.
return_None : bool, optional
Catch IOError exception when file not found and return None.
Default is False.
combine : str
String used by xarray.open_mfdataset() to determine how to combine
data files into one Dataset. See Xarray documentation for options.
use_cftime : boolean
Option to use cftime library to parse the time units string and correctly
establish the time values with a units string containing timezone offset.
This will return the time in cftime format. See cftime_to_datetime64 if
don't want to convert the times in xarray dataset from cftime to numpy datetime64.
cftime_to_datetime64 : boolean
If time is stored as cftime in xarray dataset convert to numpy datetime64. If time
precision requried is sub millisecond set decode_times=False but leave
cftime_to_datetime64=True. This will force it to use base_time and time_offset
to set time.
combine_attrs : str
String indicating how to combine attrs of the objects being merged
cleanup_qc : boolean
Call clean.cleanup() method to convert to standardized ancillary quality control
variables. This will not allow any keyword options, so if non-default behavior is
desired will need to call clean.cleanup() method on the object after reading the data.
keep_variables : str or list of str
Variable names to read from data file. Works by creating a list of variable names
to exclude from reading and passing into open_mfdataset() via drop_variables keyword.
Still allows use of drop_variables keyword for variables not listed in first file to
read.
**kwargs : keywords
Keywords to pass through to xarray.open_mfdataset().
Returns
-------
obj : Object (or None)
ACT dataset (or None if no data file(s) found).
Examples
--------
This example will load the example sounding data used for unit testing.
.. code-block :: python
import act
the_ds, the_flag = act.io.armfiles.read_netcdf(act.tests.sample_files.EXAMPLE_SONDE_WILDCARD)
print(the_ds.attrs._datastream)
"""
ds = None
file_dates = []
file_times = []
# Add funciton keywords to kwargs dictionary for passing into open_mfdataset.
kwargs['combine'] = combine
kwargs['concat_dim'] = concat_dim
kwargs['use_cftime'] = use_cftime
if len(filenames) > 1 and not isinstance(filenames, str):
kwargs['combine_attrs'] = combine_attrs
# Check if keep_variables is set. If so determine correct drop_variables
if keep_variables is not None:
drop_variables = None
if 'drop_variables' in kwargs.keys():
drop_variables = kwargs['drop_variables']
kwargs['drop_variables'] = keep_variables_to_drop_variables(
filenames, keep_variables, drop_variables=drop_variables)
# Create an exception tuple to use with try statements. Doing it this way
# so we can add the FileNotFoundError if requested. Can add more error
# handling in the future.
except_tuple = (ValueError,)
if return_None:
except_tuple = except_tuple + (FileNotFoundError, OSError)
try:
# Read data file with Xarray function
ds = xr.open_mfdataset(filenames, **kwargs)
except except_tuple as exception:
# If requested return None for File not found error
if type(exception).__name__ == 'FileNotFoundError':
return None
# If requested return None for File not found error
if type(exception).__name__ == 'OSError' and exception.args[0] == 'no files to open':
return None
# Look at error message and see if could be nested error message. If so
# update combine keyword and try again. This should allow reading files
# without a time variable but base_time and time_offset variables.
if (
kwargs['combine'] != 'nested'
and type(exception).__name__ == 'ValueError'
and exception.args[0] == 'Could not find any dimension coordinates '
'to use to order the datasets for concatenation'
):
kwargs['combine'] = 'nested'
ds = xr.open_mfdataset(filenames, **kwargs)
else:
# When all else fails raise the orginal exception
raise exception
# Xarray has issues reading a CF formatted time units string if it contains
# timezone offset without a [+|-] preceeding timezone offset.
# https://github.com/pydata/xarray/issues/3644
# To ensure the times are read in correctly need to set use_cftime=True.
# This will read in time as cftime object. But Xarray uses numpy datetime64
# natively. This will convert the cftime time values to numpy datetime64. cftime
# does not preserve the time past ms precision. We will use ms precision for
# the conversion.
desired_time_precision = 'datetime64[ms]'
for var_name in ['time', 'time_offset']:
try:
if (
cftime_to_datetime64
and 'time' in ds.dims
and type(ds[var_name].values[0]).__module__.startswith('cftime.')
):
# If we just convert time to datetime64 the group, sel, and other Xarray
# methods will not work correctly because time is not indexed. Need to
# use the formation of a Dataset to correctly set the time indexing.
temp_ds = xr.Dataset(
{
var_name: (
ds[var_name].dims,
ds[var_name].values.astype(desired_time_precision),
ds[var_name].attrs,
)
}
)
ds[var_name] = temp_ds[var_name]
temp_ds.close()
# If time_offset is in file try to convert base_time as well
if var_name == 'time_offset':
ds['base_time'].values = ds['base_time'].values.astype(desired_time_precision)
except KeyError:
pass
# Check if "time" variable is not in the netCDF file. If so try to use
# base_time and time_offset to make time variable. Basically a fix for incorrectly
# formatted files. May require using decode_times=False to initially read the data.
if (
cftime_to_datetime64
and 'time' in ds.dims
and 'time' not in ds.coords
and 'time_offset' in ds.data_vars
):
try:
ds = ds.rename({'time_offset': 'time'})
ds = ds.set_coords('time')
del ds['time'].attrs['units']
except (KeyError, ValueError):
pass
# If "time" is not a datetime64 use base_time to calcualte corect values to datetime64
# by adding base_time to time_offset. time_offset was renamed to time above.
if (
cftime_to_datetime64
and 'time' in ds.dims
and 'base_time' in ds.data_vars
and not np.issubdtype(ds['time'].values.dtype, np.datetime64)
and not type(ds['time'].values[0]).__module__.startswith('cftime.')
):
# Use microsecond precision to create time since epoch. Then convert to datetime64
if ds['base_time'].values == ds['time_offset'].values[0]:
time = ds['time_offset'].values
else:
time = (ds['base_time'].values + ds['time_offset'].values * 1000000.0).astype(
'datetime64[us]'
)
# Need to use a new Dataset creation to correctly index time for use with
# .group and .resample methods in Xarray Datasets.
temp_ds = xr.Dataset({'time': (ds['time'].dims, time, ds['time'].attrs)})
ds['time'] = temp_ds['time']
temp_ds.close()
for att_name in ['units', 'ancillary_variables']:
try:
del ds['time'].attrs[att_name]
except KeyError:
pass
# Adding support for wildcards
if isinstance(filenames, str):
filenames = glob.glob(filenames)
elif isinstance(filenames, PosixPath):
filenames = [filenames]
# Get file dates and times that were read in to the object
filenames.sort()
for f in filenames:
f = Path(f).name
pts = re.match(r'(^[a-zA-Z0-9]+)\.([0-9a-z]{2})\.([\d]{8})\.([\d]{6})\.([a-z]{2,3}$)', f)
# If Not ARM format, read in first time for info
if pts is not None:
pts = pts.groups()
file_dates.append(pts[2])
file_times.append(pts[3])
else:
if ds['time'].size > 1:
dummy = ds['time'].values[0]
else:
dummy = ds['time'].values
file_dates.append(utils.numpy_to_arm_date(dummy))
file_times.append(utils.numpy_to_arm_date(dummy, returnTime=True))
# Add attributes
ds.attrs['_file_dates'] = file_dates
ds.attrs['_file_times'] = file_times
is_arm_file_flag = check_arm_standards(ds)
# Ensure that we have _datastream set whether or no there's
# a datastream attribute already.
if is_arm_file_flag == 0:
ds.attrs['_datastream'] = DEFAULT_DATASTREAM_NAME
else:
ds.attrs['_datastream'] = ds.attrs['datastream']
ds.attrs['_arm_standards_flag'] = is_arm_file_flag
if cleanup_qc:
ds.clean.cleanup()
return ds
def keep_variables_to_drop_variables(
filenames,
keep_variables,
drop_variables=None):
"""
Returns a list of variable names to exclude from reading by passing into
`Xarray.open_dataset` drop_variables keyword. This can greatly help reduce
loading time and disk space use of the Dataset.
When passed a netCDF file name, will open the file using the netCDF4 library to get
list of variable names. There is less overhead reading the varible names using
netCDF4 library than Xarray. If more than one filename is provided or string is
used for shell syntax globbing, will use the first file in the list.
Parameters
----------
filenames : str, pathlib.PosixPath or list of str
Name of file(s) to read.
keep_variables : str or list of str
Variable names desired to keep. Do not need to list associated dimention
names. These will be automatically kept as well.
drop_variables : str or list of str
Variable names to explicitly add to returned list. May be helpful if a variable
exists in a file that is not in the first file in the list.
Returns
-------
obj : list of str
Variable names to exclude from returned Dataset by using drop_variables keyword
when calling Xarray.open_dataset().
Examples
--------
.. code-block :: python
import act
filename = '/data/datastream/hou/houkasacrcfrM1.a1/houkasacrcfrM1.a1.20220404.*.nc'
drop_vars = act.io.armfiles.keep_variables_to_drop_variables(
filename, ['lat','lon','alt','crosspolar_differential_phase'],
drop_variables='variable_name_that_only_exists_in_last_file_of_the_day')
"""
read_variables = []
return_variables = []
if isinstance(keep_variables, str):
keep_variables = [keep_variables]
if isinstance(drop_variables, str):
drop_variables = [drop_variables]
# If filenames is a list subset to first file name.
if isinstance(filenames, (list, tuple)):
filename = filenames[0]
# If filenames is a string, check if it needs to be expanded in shell
# first. Then use first returned file name. Else use the string filename.
elif isinstance(filenames, str):
filename = glob.glob(filenames)
if len(filename) == 0:
return return_variables
else:
filename.sort()
filename = filename[0]
# Use netCDF4 library to extract the variable and dimension names.
rootgrp = Dataset(filename, 'r')
read_variables = list(rootgrp.variables)
dimensions = list(rootgrp.dimensions)
# Loop over the variables to exclude needed coordinate dimention names.
dims_to_keep = []
for var_name in keep_variables:
try:
dims_to_keep.extend(list(rootgrp[var_name].dimensions))
except IndexError:
pass
rootgrp.close()
# Remove names not matching keep_varibles excluding the associated coordinate dimentions
return_variables = set(read_variables) - set(keep_variables) - set(dims_to_keep)
# Add drop_variables to list
if drop_variables is not None:
return_variables = set(return_variables) | set(drop_variables)
return list(return_variables)
def check_arm_standards(ds):
"""
Checks to see if an xarray dataset conforms to ARM standards.
Parameters
----------
ds : xarray dataset
The dataset to check.
Returns
-------
flag : int
The flag corresponding to whether or not the file conforms
to ARM standards. Bit packed, so 0 for no, 1 for yes
"""
the_flag = 1 << 0
if 'datastream' not in ds.attrs.keys():
the_flag = 0
# Check if the historical global attribute name is
# used instead of updated name of 'datastream'. If so
# correct the global attributes and flip flag.
if 'zeb_platform' in ds.attrs.keys():
ds.attrs['datastream'] = copy.copy(ds.attrs['zeb_platform'])
del ds.attrs['zeb_platform']
the_flag = 1 << 0
return the_flag
def create_obj_from_arm_dod(proc, set_dims, version='', fill_value=-9999.0, scalar_fill_dim=None):
"""
Queries the ARM DOD api and builds an object based on the ARM DOD and
the dimension sizes that are passed in.
Parameters
----------
proc : string
Process to create the object off of. This is normally in the
format of inst.level. i.e. vdis.b1 or kazrge.a1
set_dims : dict
Dictionary of dims from the DOD and the corresponding sizes.
Time is required. Code will try and pull from DOD, unless set
through this variable
Note: names need to match exactly what is in the dod
i.e. {'drop_diameter': 50, 'time': 1440}
version : string
Version number of the ingest to use. If not set, defaults to
latest version
fill_value : float
Fill value for non-dimension variables. Dimensions cannot have
duplicate values and are incrementally set (0, 1, 2)
scalar_fill_dim : str
Depending on how the object is set up, sometimes the scalar values
are dimensioned to the main dimension. i.e. a lat/lon is set to have
a dimension of time. This is a way to set it up similarly.
Returns
-------
obj : xarray Dataset
ACT object populated with all variables and attributes.
Examples
--------
.. code-block :: python
dims = {'time': 1440, 'drop_diameter': 50}
obj = act.io.armfiles.create_obj_from_arm_dod(
'vdis.b1', dims, version='1.2', scalar_fill_dim='time')
"""
# Set base url to get DOD information
base_url = 'https://pcm.arm.gov/pcm/api/dods/'
# Get data from DOD api
with urllib.request.urlopen(base_url + proc) as url:
data = json.loads(url.read().decode())
# Check version numbers and alert if requested version in not available
keys = list(data['versions'].keys())
if version not in keys:
warnings.warn(
' '.join(
['Version:', version, 'not available or not specified. Using Version:', keys[-1]]
),
UserWarning,
)
version = keys[-1]
# Create empty xarray dataset
obj = xr.Dataset()
# Get the global attributes and add to dataset
atts = {}
for a in data['versions'][version]['atts']:
if a['name'] == 'string':
continue
if a['value'] is None:
a['value'] = ''
atts[a['name']] = a['value']
obj.attrs = atts
# Get variable information and create dataarrays that are
# then added to the dataset
# If not passed in through set_dims, will look to the DOD
# if not set in the DOD, then will raise error
variables = data['versions'][version]['vars']
dod_dims = data['versions'][version]['dims']
for d in dod_dims:
if d['name'] not in list(set_dims.keys()):
if d['length'] > 0:
set_dims[d['name']] = d['length']
else:
raise ValueError(
'Dimension length not set in DOD for '
+ d['name']
+ ', nor passed in through set_dim'
)
for v in variables:
dims = v['dims']
dim_shape = []
# Using provided dimension data, fill array accordingly for easy overwrite
if len(dims) == 0:
if scalar_fill_dim is None:
data_na = fill_value
else:
data_na = np.full(set_dims[scalar_fill_dim], fill_value)
v['dims'] = scalar_fill_dim
else:
for d in dims:
dim_shape.append(set_dims[d])
if len(dim_shape) == 1 and v['name'] == dims[0]:
data_na = np.arange(dim_shape[0])
else:
data_na = np.full(dim_shape, fill_value)
# Get attribute information. Had to do some things to get to print to netcdf
atts = {}
str_flag = False
for a in v['atts']:
if a['name'] == 'string':
str_flag = True
continue
if a['value'] is None:
continue
if str_flag and a['name'] == 'units':
continue
atts[a['name']] = a['value']
da = xr.DataArray(data=data_na, dims=v['dims'], name=v['name'], attrs=atts)
obj[v['name']] = da
return obj
@xr.register_dataset_accessor('write')
class WriteDataset:
"""
Class for cleaning up Dataset before writing to file.
"""
def __init__(self, xarray_obj):
self._obj = xarray_obj
def write_netcdf(
self,
cleanup_global_atts=True,
cleanup_qc_atts=True,
join_char='__',
make_copy=True,
cf_compliant=False,
delete_global_attrs=['qc_standards_version', 'qc_method', 'qc_comment'],
FillValue=-9999,
cf_convention='CF-1.8',
**kwargs,
):
"""
This is a wrapper around Dataset.to_netcdf to clean up the Dataset before
writing to disk. Some things are added to global attributes during ACT reading
process, and QC variables attributes are modified during QC cleanup process.
This will modify before writing to disk to better
match Climate & Forecast standards.
Parameters
----------
cleanup_global_atts : boolean
Option to cleanup global attributes by removing any global attribute
that starts with an underscore.
cleanup_qc_atts : boolean
Option to convert attributes that would be written as string array
to be a single character string. CF 1.7 does not allow string attribures.
Will use a single space a delimeter between values and join_char to replace
white space between words.
join_char : str
The character sting to use for replacing white spaces between words when converting
a list of strings to single character string attributes.
make_copy : boolean
Make a copy before modifying Dataset to write. For large Datasets this
may add processing time and memory. If modifying the Dataset is OK
try setting to False.
cf_compliant : boolean
Option to output file with additional attributes to make file Climate & Forecast
complient. May require runing .clean.cleanup() method on the object to fix other
issues first. This does the best it can but it may not be truely complient. You
should read the CF documents and try to make complient before writing to file.
delete_global_attrs : list
Optional global attributes to be deleted. Defaults to some standard
QC attributes that are not needed. Can add more or set to None to not
remove the attributes.
FillValue : int, float
The value to use as a _FillValue in output file. This is used to fix
issues with how Xarray handles missing_value upon reading. It's confusing
so not a perfect fix. Set to None to leave Xarray to do what it wants.
Set to a value to be the value used as _FillValue in the file and data
array. This should then remove missing_value attribute from the file as well.
cf_convention : str
The Climate and Forecast convention string to add to Conventions attribute.
**kwargs : keywords
Keywords to pass through to Dataset.to_netcdf()
Examples
--------
.. code-block :: python
ds_object.write.write_netcdf(path='output.nc')
"""
if make_copy:
write_obj = copy.deepcopy(self._obj)
else:
write_obj = self._obj
encoding = {}
if cleanup_global_atts:
for attr in list(write_obj.attrs):
if attr.startswith('_'):
del write_obj.attrs[attr]
if cleanup_qc_atts:
check_atts = ['flag_meanings', 'flag_assessments']
for var_name in list(write_obj.data_vars):
if 'standard_name' not in write_obj[var_name].attrs.keys():
continue
for attr_name in check_atts:
try:
att_values = write_obj[var_name].attrs[attr_name]
if isinstance(att_values, (list, tuple)):
att_values = [att_value.replace(' ', join_char) for att_value in att_values]
write_obj[var_name].attrs[attr_name] = ' '.join(att_values)
except KeyError:
pass
# Tell .to_netcdf() to not add a _FillValue attribute for
# quality control variables.
if FillValue is not None:
encoding[var_name] = {'_FillValue': None}
# Clean up _FillValue vs missing_value mess by creating an
# encoding dictionary with each variable's _FillValue set to
# requested fill value. May need to improve upon this for data type
# and other issues in the future.
if FillValue is not None:
skip_variables = ['base_time', 'time_offset', 'qc_time'] + list(encoding.keys())
for var_name in list(write_obj.data_vars):
if var_name not in skip_variables:
encoding[var_name] = {'_FillValue': FillValue}
if delete_global_attrs is not None:
for attr in delete_global_attrs:
try:
del write_obj.attrs[attr]
except KeyError:
pass
# If requested update global attributes and variables attributes for required
# CF attributes.
if cf_compliant:
# Get variable names and standard name for each variable
var_names = list(write_obj.keys())
standard_names = []
for var_name in var_names:
try:
standard_names.append(write_obj[var_name].attrs['standard_name'])
except KeyError:
standard_names.append(None)
# Check if time varible has axis and standard_name attribute
coord_name = 'time'
try:
write_obj[coord_name].attrs['axis']
except KeyError:
try:
write_obj[coord_name].attrs['axis'] = 'T'
except KeyError:
pass
try:
write_obj[coord_name].attrs['standard_name']
except KeyError:
try:
write_obj[coord_name].attrs['standard_name'] = 'time'
except KeyError:
pass
# Try to determine type of dataset by coordinate dimention named time
# and other factors
try:
write_obj.attrs['FeatureType']
except KeyError:
dim_names = list(write_obj.dims)
FeatureType = None
if dim_names == ['time']:
FeatureType = 'timeSeries'
elif len(dim_names) == 2 and 'time' in dim_names and 'bound' in dim_names:
FeatureType = 'timeSeries'
elif len(dim_names) >= 2 and 'time' in dim_names:
for var_name in var_names:
dims = list(write_obj[var_name].dims)
if len(dims) == 2 and 'time' in dims:
prof_dim = list(set(dims) - {'time'})[0]
if write_obj[prof_dim].values.size > 2:
FeatureType = 'timeSeriesProfile'
break
if FeatureType is not None:
write_obj.attrs['FeatureType'] = FeatureType
# Add axis and positive attributes to variables with standard_name
# equal to 'altitude'
alt_variables = [
var_names[ii] for ii, sn in enumerate(standard_names) if sn == 'altitude'
]
for var_name in alt_variables:
try:
write_obj[var_name].attrs['axis']
except KeyError:
write_obj[var_name].attrs['axis'] = 'Z'
try:
write_obj[var_name].attrs['positive']
except KeyError:
write_obj[var_name].attrs['positive'] = 'up'
# Check if the Conventions global attribute lists the CF convention
try:
Conventions = write_obj.attrs['Conventions']
Conventions = Conventions.split()
cf_listed = False
for ii in Conventions:
if ii.startswith('CF-'):
cf_listed = True
break
if not cf_listed:
Conventions.append(cf_convention)
write_obj.attrs['Conventions'] = ' '.join(Conventions)
except KeyError:
write_obj.attrs['Conventions'] = str(cf_convention)
# Reorder global attributes to ensure history is last
try:
global_attrs = write_obj.attrs
history = copy.copy(global_attrs['history'])
del global_attrs['history']
global_attrs['history'] = history
except KeyError:
pass
write_obj.to_netcdf(encoding=encoding, **kwargs)
def read_mmcr(filenames):
"""
Reads in ARM MMCR files and splits up the variables into specific
mode variables based on what's in the files. MMCR files have the modes
interleaved and are not readable using xarray so some modifications are
needed ahead of time.
Parameters
----------
filenames : str, pathlib.PosixPath or list of str
Name of file(s) to read.
Returns
-------
obj : Object (or None)
ACT dataset (or None if no data file(s) found).
"""
# Sort the files to make sure they concatenate right
filenames.sort()
# Run through each file and read it in using netCDF4, then
# read it in with xarray
objs = []
for f in filenames:
nc = Dataset(f, "a")
# Change heights name to range to read appropriately to xarray
if 'heights' in nc.dimensions:
nc.renameDimension('heights', 'range')
if nc is not None:
obj = xr.open_dataset(xr.backends.NetCDF4DataStore(nc))
objs.append(obj)
# Concatenate objects together
if len(objs) > 1:
obj = xr.concat(objs, dim='time')
else:
obj = objs
# Get mdoes and ranges with time/height modes
modes = obj['mode'].values
mode_vars = []
for v in obj:
if 'range' in obj[v].dims and 'time' in obj[v].dims and len(obj[v].dims) == 2:
mode_vars.append(v)
# For each mode, run extract data variables if available
# saves as individual variables in the file.
for m in modes:
mode_desc = obj['ModeDescription'].values[0, m]
if np.isnan(obj['heights'].values[0, m, :]).all():
continue
mode_desc = str(mode_desc).split('_')[-1][0:-1]
mode_desc = str(mode_desc).split('\'')[0]
idx = np.where(obj['ModeNum'].values == m)[0]
range_data = obj['heights'].values[0, m, :]
idy = np.where(~np.isnan(range_data))[0]
for v in mode_vars:
new_var_name = v + '_' + mode_desc
time_name = 'time_' + mode_desc
range_name = 'range_' + mode_desc
data = obj[v].values[idx, :]
data = data[:, idy]
attrs = obj[v].attrs
da = xr.DataArray(
data=data,
coords={time_name: obj['time'].values[idx], range_name: range_data[idy]},
dims=[time_name, range_name],
attrs=attrs
)
obj[new_var_name] = da
return obj
<|code_end|>
act/plotting/timeseriesdisplay.py
<|code_start|>"""
Stores the class for TimeSeriesDisplay.
"""
import datetime as dt
import textwrap
import warnings
from copy import deepcopy
from re import search, search as re_search
import matplotlib as mpl
import matplotlib.dates as mdates
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from matplotlib import colors as mplcolors
from mpl_toolkits.axes_grid1 import make_axes_locatable
from scipy.interpolate import NearestNDInterpolator
from ..qc.qcfilter import parse_bit
from ..utils import data_utils, datetime_utils as dt_utils
from ..utils.datetime_utils import determine_time_delta, reduce_time_ranges
from ..utils.geo_utils import get_sunrise_sunset_noon
from . import common
from .plot import Display
class TimeSeriesDisplay(Display):
"""
This subclass contains routines that are specific to plotting
time series plots from data. It is inherited from Display and therefore
contains all of Display's attributes and methods.
Examples
--------
To create a TimeSeriesDisplay with 3 rows, simply do:
.. code-block:: python
ds = act.read_netcdf(the_file)
disp = act.plotting.TimeSeriesDisplay(ds, subplot_shape=(3,), figsize=(15, 5))
The TimeSeriesDisplay constructor takes in the same keyword arguments as
plt.subplots. For more information on the plt.subplots keyword arguments,
see the `matplotlib documentation
<https://matplotlib.org/api/_as_gen/matplotlib.pyplot.subplots.html>`_.
If no subplot_shape is provided, then no figure or axis will be created
until add_subplots or plots is called.
"""
def __init__(self, obj, subplot_shape=(1,), ds_name=None, **kwargs):
super().__init__(obj, subplot_shape, ds_name, **kwargs)
def day_night_background(self, dsname=None, subplot_index=(0,)):
"""
Colorcodes the background according to sunrise/sunset.
Parameters
----------
dsname : None or str
If there is more than one datastream in the display object the
name of the datastream needs to be specified. If set to None and
there is only one datastream then ACT will use the sole datastream
in the object.
subplot_index : 1 or 2D tuple, list, or array
The index to the subplot to place the day and night background in.
"""
if dsname is None and len(self._obj.keys()) > 1:
raise ValueError(
'You must choose a datastream to derive the '
+ 'information needed for the day and night '
+ 'background when 2 or more datasets are in '
+ 'the display object.'
)
elif dsname is None:
dsname = list(self._obj.keys())[0]
# Get File Dates
try:
file_dates = self._obj[dsname].attrs['_file_dates']
except KeyError:
file_dates = []
if len(file_dates) == 0:
sdate = dt_utils.numpy_to_arm_date(self._obj[dsname].time.values[0])
edate = dt_utils.numpy_to_arm_date(self._obj[dsname].time.values[-1])
file_dates = [sdate, edate]
all_dates = dt_utils.dates_between(file_dates[0], file_dates[-1])
if self.axes is None:
raise RuntimeError('day_night_background requires the plot to ' 'be displayed.')
ax = self.axes[subplot_index]
# Find variable names for latitude and longitude
variables = list(self._obj[dsname].data_vars)
lat_name = [var for var in ['lat', 'latitude'] if var in variables]
lon_name = [var for var in ['lon', 'longitude'] if var in variables]
if len(lat_name) == 0:
lat_name = None
else:
lat_name = lat_name[0]
if len(lon_name) == 0:
lon_name = None
else:
lon_name = lon_name[0]
# Variable name does not match, look for standard_name declaration
if lat_name is None or lon_name is None:
for var in variables:
try:
if self._obj[dsname][var].attrs['standard_name'] == 'latitude':
lat_name = var
except KeyError:
pass
try:
if self._obj[dsname][var].attrs['standard_name'] == 'longitude':
lon_name = var
except KeyError:
pass
if lat_name is not None and lon_name is not None:
break
if lat_name is None or lon_name is None:
return
try:
if self._obj[dsname][lat_name].data.size > 1:
# Look for non-NaN values to use for locaiton. If not found use first value.
lat = self._obj[dsname][lat_name].values
index = np.where(np.isfinite(lat))[0]
if index.size == 0:
index = [0]
lat = float(lat[index[0]])
# Look for non-NaN values to use for locaiton. If not found use first value.
lon = self._obj[dsname][lon_name].values
index = np.where(np.isfinite(lon))[0]
if index.size == 0:
index = [0]
lon = float(lon[index[0]])
else:
lat = float(self._obj[dsname][lat_name].values)
lon = float(self._obj[dsname][lon_name].values)
except AttributeError:
return
if not np.isfinite(lat):
warnings.warn(f"Latitude value in dataset of '{lat}' is not finite. ", RuntimeWarning)
return
if not np.isfinite(lon):
warnings.warn(f"Longitude value in dataset of '{lon}' is not finite. ", RuntimeWarning)
return
lat_range = [-90, 90]
if not (lat_range[0] <= lat <= lat_range[1]):
warnings.warn(
f"Latitude value in dataset of '{lat}' not within acceptable "
f'range of {lat_range[0]} <= latitude <= {lat_range[1]}. ',
RuntimeWarning,
)
return
lon_range = [-180, 180]
if not (lon_range[0] <= lon <= lon_range[1]):
warnings.warn(
f"Longitude value in dataset of '{lon}' not within acceptable "
f'range of {lon_range[0]} <= longitude <= {lon_range[1]}. ',
RuntimeWarning,
)
return
# initialize the plot to a gray background for total darkness
rect = ax.patch
rect.set_facecolor('0.85')
# Get date ranges to plot
plot_dates = []
for f in all_dates:
for ii in [-1, 0, 1]:
plot_dates.append(f + dt.timedelta(days=ii))
# Get sunrise, sunset and noon times
sunrise, sunset, noon = get_sunrise_sunset_noon(lat, lon, plot_dates)
# Plot daylight
for ii in range(0, len(sunrise)):
ax.axvspan(sunrise[ii], sunset[ii], facecolor='#FFFFCC', zorder=0)
# Plot noon line
for ii in noon:
ax.axvline(x=ii, linestyle='--', color='y', zorder=1)
def set_xrng(self, xrng, subplot_index=(0,)):
"""
Sets the x range of the plot.
Parameters
----------
xrng : 2 number array
The x limits of the plot.
subplot_index : 1 or 2D tuple, list, or array
The index of the subplot to set the x range of.
"""
if self.axes is None:
raise RuntimeError('set_xrng requires the plot to be displayed.')
self.axes[subplot_index].set_xlim(xrng)
# Make sure that the xrng value is a numpy array not pandas
if isinstance(xrng[0], pd.Timestamp):
xrng = [x.to_numpy() for x in xrng if isinstance(x, pd.Timestamp)]
# Make sure that the xrng value is a numpy array not datetime.datetime
if isinstance(xrng[0], dt.datetime):
xrng = [np.datetime64(x) for x in xrng if isinstance(x, dt.datetime)]
if len(subplot_index) < 2:
self.xrng[subplot_index, 0] = xrng[0].astype('datetime64[D]').astype(float)
self.xrng[subplot_index, 1] = xrng[1].astype('datetime64[D]').astype(float)
else:
self.xrng[subplot_index][0] = xrng[0].astype('datetime64[D]').astype(float)
self.xrng[subplot_index][1] = xrng[1].astype('datetime64[D]').astype(float)
def set_yrng(self, yrng, subplot_index=(0,)):
"""
Sets the y range of the plot.
Parameters
----------
yrng : 2 number array
The y limits of the plot.
subplot_index : 1 or 2D tuple, list, or array
The index of the subplot to set the x range of.
"""
if self.axes is None:
raise RuntimeError('set_yrng requires the plot to be displayed.')
if not hasattr(self, 'yrng') and len(self.axes.shape) == 2:
self.yrng = np.zeros((self.axes.shape[0], self.axes.shape[1], 2))
elif not hasattr(self, 'yrng') and len(self.axes.shape) == 1:
self.yrng = np.zeros((self.axes.shape[0], 2))
if yrng[0] == yrng[1]:
yrng[1] = yrng[1] + 1
self.axes[subplot_index].set_ylim(yrng)
try:
self.yrng[subplot_index, :] = yrng
except IndexError:
self.yrng[subplot_index] = yrng
def plot(
self,
field,
dsname=None,
subplot_index=(0,),
cmap=None,
set_title=None,
add_nan=False,
day_night_background=False,
invert_y_axis=False,
abs_limits=(None, None),
time_rng=None,
y_rng=None,
use_var_for_y=None,
set_shading='auto',
assessment_overplot=False,
overplot_marker='.',
overplot_behind=False,
overplot_markersize=6,
assessment_overplot_category={
'Incorrect': ['Bad', 'Incorrect'],
'Suspect': ['Indeterminate', 'Suspect'],
},
assessment_overplot_category_color={'Incorrect': 'red', 'Suspect': 'orange'},
force_line_plot=False,
labels=False,
cbar_label=None,
cbar_h_adjust=None,
secondary_y=False,
y_axis_flag_meanings=False,
colorbar_labels=None,
**kwargs,
):
"""
Makes a timeseries plot. If subplots have not been added yet, an axis
will be created assuming that there is only going to be one plot.
If plotting a high data volume 2D dataset, it may take some time to plot.
In order to speed up your plot creation, please resample your data to a
lower resolution dataset.
Parameters
----------
field : str
The name of the field to plot.
dsname : None or str
If there is more than one datastream in the display object the
name of the datastream needs to be specified. If set to None and
there is only one datastream ACT will use the sole datastream
in the object.
subplot_index : 1 or 2D tuple, list, or array
The index of the subplot to set the x range of.
cmap : matplotlib colormap
The colormap to use.
set_title : str
The title for the plot.
add_nan : bool
Set to True to fill in data gaps with NaNs.
day_night_background : bool
Set to True to fill in a color coded background.
according to the time of day.
abs_limits : tuple or list
Sets the bounds on plot limits even if data values exceed
those limits. Set to (ymin,ymax). Use None if only setting
minimum or maximum limit, i.e. (22., None).
time_rng : tuple or list
List or tuple with (min, max) values to set the x-axis range
limits.
y_rng : tuple or list
List or tuple with (min, max) values to set the y-axis range
use_var_for_y : str
Set this to the name of a data variable in the Dataset to use as
the y-axis variable instead of the default dimension. Useful for
instances where data has an index-based dimension instead of a
height-based dimension. If shapes of arrays do not match it will
automatically revert back to the original ydata.
set_shading : string
Option to to set the matplotlib.pcolormesh shading parameter.
Default to 'auto'
assessment_overplot : boolean
Option to overplot quality control colored symbols over plotted
data using flag_assessment categories.
overplot_marker : str
Marker to use for overplot symbol.
overplot_behind : bool
Place the overplot marker behind the data point.
overplot_markersize : float or int
Size of overplot marker. If overplot_behind or force_line_plot
are set the marker size will be double overplot_markersize so
the color is visible.
assessment_overplot_category : dict
Lookup to categorize assessments into groups. This allows using
multiple terms for the same quality control level of failure.
Also allows adding more to the defaults.
assessment_overplot_category_color : dict
Lookup to match overplot category color to assessment grouping.
force_line_plot : boolean
Option to plot 2D data as 1D line plots.
labels : boolean or list
Option to overwrite the legend labels. Must have same dimensions as
number of lines plotted.
cbar_label : str
Option to overwrite default colorbar label.
cbar_h_adjust : float
Option to adjust location of colorbar horizontally. Positive values
move to right negative values move to left.
secondary_y : boolean
Option to plot on secondary y axis.
y_axis_flag_meanings : boolean or int
When set to True and plotting state variable with flag_values and
flag_meanings attribures will replace y axis numerical values
with flag_meanings value. Set to a positive number larger than 1
to indicate maximum word length to use. If text is longer that the
value and has space characters will split text over multiple lines.
colorbar_labels : dict
A dictionary containing values for plotting a 2D array of state variables.
The dictionary uses data values as keys and a dictionary containing keys
'text' and 'color' for each data value to plot.
Example:
{0: {'text': 'Clear sky', 'color': 'white'},
1: {'text': 'Liquid', 'color': 'green'},
2: {'text': 'Ice', 'color': 'blue'},
3: {'text': 'Mixed phase', 'color': 'purple'}}
**kwargs : keyword arguments
The keyword arguments for :func:`plt.plot` (1D timeseries) or
:func:`plt.pcolormesh` (2D timeseries).
Returns
-------
ax : matplotlib axis handle
The matplotlib axis handle of the plot.
"""
if dsname is None and len(self._obj.keys()) > 1:
raise ValueError(
'You must choose a datastream when there are 2 '
'or more datasets in the TimeSeriesDisplay '
'object.'
)
elif dsname is None:
dsname = list(self._obj.keys())[0]
if y_axis_flag_meanings:
kwargs['linestyle'] = ''
# Get data and dimensions
data = self._obj[dsname][field]
dim = list(self._obj[dsname][field].dims)
xdata = self._obj[dsname][dim[0]]
if 'units' in data.attrs:
ytitle = ''.join(['(', data.attrs['units'], ')'])
else:
ytitle = field
if cbar_label is None:
cbar_default = ytitle
if len(dim) > 1:
if use_var_for_y is None:
ydata = self._obj[dsname][dim[1]]
else:
ydata = self._obj[dsname][use_var_for_y]
ydata_dim1 = self._obj[dsname][dim[1]]
if np.shape(ydata) != np.shape(ydata_dim1):
ydata = ydata_dim1
units = ytitle
if 'units' in ydata.attrs.keys():
units = ydata.attrs['units']
ytitle = ''.join(['(', units, ')'])
else:
units = ''
ytitle = dim[1]
# Create labels if 2d as 1d
if force_line_plot is True:
if labels is True:
labels = [' '.join([str(d), units]) for d in ydata.values]
ytitle = f"({data.attrs['units']})"
ydata = None
else:
ydata = None
# Get the current plotting axis, add day/night background and plot data
if self.fig is None:
self.fig = plt.figure()
if self.axes is None:
self.axes = np.array([plt.axes()])
self.fig.add_axes(self.axes[0])
# Set up secondary y axis if requested
if secondary_y is False:
ax = self.axes[subplot_index]
else:
ax = self.axes[subplot_index].twinx()
if colorbar_labels is not None:
flag_values = list(colorbar_labels.keys())
flag_meanings = [value['text'] for key, value in colorbar_labels.items()]
cbar_colors = [value['color'] for key, value in colorbar_labels.items()]
cmap = mpl.colors.ListedColormap(cbar_colors)
for ii, flag_meaning in enumerate(flag_meanings):
if len(flag_meaning) > 20:
flag_meaning = textwrap.fill(flag_meaning, width=20)
flag_meanings[ii] = flag_meaning
else:
flag_values = None
flag_meanings = None
cbar_colors = None
if ydata is None:
# Add in nans to ensure the data does not connect the line.
if add_nan is True:
xdata, data = data_utils.add_in_nan(xdata, data)
if day_night_background is True:
self.day_night_background(subplot_index=subplot_index, dsname=dsname)
# If limiting data being plotted use masked arrays
# Need to do it this way because of autoscale() method
if abs_limits[0] is not None and abs_limits[1] is not None:
data = np.ma.masked_outside(data, abs_limits[0], abs_limits[1])
elif abs_limits[0] is not None and abs_limits[1] is None:
data = np.ma.masked_less_equal(data, abs_limits[0])
elif abs_limits[0] is None and abs_limits[1] is not None:
data = np.ma.masked_greater_equal(data, abs_limits[1])
# Plot the data
if 'marker' not in kwargs.keys():
kwargs['marker'] = '.'
lines = ax.plot(xdata, data, **kwargs)
# Check if we need to call legend method after plotting. This is only
# called when no assessment overplot is called.
add_legend = False
if 'label' in kwargs.keys():
add_legend = True
# Overplot failing data if requested
if assessment_overplot:
# If we are doing forced line plot from 2D data need to manage
# legend lables. Will make arrays to hold labels of QC failing
# because not set when labels not set.
if not isinstance(labels, list) and add_legend is False:
labels = []
lines = []
# For forced line plot need to plot QC behind point instead of
# on top of point.
zorder = None
if force_line_plot or overplot_behind:
zorder = 0
overplot_markersize *= 2.0
for assessment, categories in assessment_overplot_category.items():
flag_data = self._obj[dsname].qcfilter.get_masked_data(
field, rm_assessments=categories, return_inverse=True
)
if np.invert(flag_data.mask).any() and np.isfinite(flag_data).any():
try:
flag_data.mask = np.logical_or(data.mask, flag_data.mask)
except AttributeError:
pass
qc_ax = ax.plot(
xdata,
flag_data,
marker=overplot_marker,
linestyle='',
markersize=overplot_markersize,
color=assessment_overplot_category_color[assessment],
label=assessment,
zorder=zorder,
)
# If labels keyword is set need to add labels for calling legend
if isinstance(labels, list):
# If plotting forced_line_plot need to subset the Line2D object
# so we don't have more than one added to legend.
if len(qc_ax) > 1:
lines.extend(qc_ax[:1])
else:
lines.extend(qc_ax)
labels.append(assessment)
add_legend = True
# Add legend if labels are available
if isinstance(labels, list):
ax.legend(lines, labels)
elif add_legend:
ax.legend()
# Change y axis to text from flag_meanings if requested.
if y_axis_flag_meanings:
flag_meanings = self._obj[dsname][field].attrs['flag_meanings']
flag_values = self._obj[dsname][field].attrs['flag_values']
# If keyword is larger than 1 assume this is the maximum character length
# desired and insert returns to wrap text.
if y_axis_flag_meanings > 1:
for ii, flag_meaning in enumerate(flag_meanings):
if len(flag_meaning) > y_axis_flag_meanings:
flag_meaning = textwrap.fill(flag_meaning, width=y_axis_flag_meanings)
flag_meanings[ii] = flag_meaning
ax.set_yticks(flag_values)
ax.set_yticklabels(flag_meanings)
else:
# Add in nans to ensure the data are not streaking
if add_nan is True:
xdata, data = data_utils.add_in_nan(xdata, data)
# Sets shading parameter to auto. Matplotlib will check deminsions.
# If X,Y and C are same deminsions shading is set to nearest.
# If X and Y deminsions are 1 greater than C shading is set to flat.
if 'edgecolors' not in kwargs.keys():
kwargs['edgecolors'] = 'face'
mesh = ax.pcolormesh(
np.asarray(xdata),
ydata,
data.transpose(),
shading=set_shading,
cmap=cmap,
**kwargs,
)
# Set Title
if set_title is None:
if isinstance(self._obj[dsname].time.values[0], np.datetime64):
set_title = ' '.join(
[
dsname,
field,
'on',
dt_utils.numpy_to_arm_date(self._obj[dsname].time.values[0]),
]
)
else:
date_result = search(
r'\d{4}-\d{1,2}-\d{1,2}', self._obj[dsname].time.attrs['units']
)
if date_result is not None:
set_title = ' '.join([dsname, field, 'on', date_result.group(0)])
else:
set_title = ' '.join([dsname, field])
if secondary_y is False:
ax.set_title(set_title)
# Set YTitle
if not y_axis_flag_meanings:
ax.set_ylabel(ytitle)
# Set X Limit - We want the same time axes for all subplots
if not hasattr(self, 'time_rng'):
if time_rng is not None:
self.time_rng = list(time_rng)
else:
self.time_rng = [xdata.min().values, xdata.max().values]
self.set_xrng(self.time_rng, subplot_index)
# Set Y Limit
if y_rng is not None:
self.set_yrng(y_rng)
if hasattr(self, 'yrng'):
# Make sure that the yrng is not just the default
if ydata is None:
if abs_limits[0] is not None or abs_limits[1] is not None:
our_data = data
else:
our_data = data.values
else:
our_data = ydata
finite = np.isfinite(our_data)
if finite.any():
our_data = our_data[finite]
if invert_y_axis is False:
yrng = [np.min(our_data), np.max(our_data)]
else:
yrng = [np.max(our_data), np.min(our_data)]
else:
yrng = [0, 1]
# Check if current range is outside of new range an only set
# values that work for all data plotted.
if isinstance(yrng[0], np.datetime64):
yrng = mdates.datestr2num([str(yrng[0]), str(yrng[1])])
current_yrng = ax.get_ylim()
if invert_y_axis is False:
if yrng[0] > current_yrng[0]:
yrng[0] = current_yrng[0]
if yrng[1] < current_yrng[1]:
yrng[1] = current_yrng[1]
else:
if yrng[0] < current_yrng[0]:
yrng[0] = current_yrng[0]
if yrng[1] > current_yrng[1]:
yrng[1] = current_yrng[1]
# Set y range the normal way if not secondary y
# If secondary, just use set_ylim
if secondary_y is False:
self.set_yrng(yrng, subplot_index)
else:
ax.set_ylim(yrng)
# Set X Format
if len(subplot_index) == 1:
days = self.xrng[subplot_index, 1] - self.xrng[subplot_index, 0]
else:
days = self.xrng[subplot_index][1] - self.xrng[subplot_index][0]
myFmt = common.get_date_format(days)
ax.xaxis.set_major_formatter(myFmt)
# Set X format - We want the same time axes for all subplots
if not hasattr(self, 'time_fmt'):
self.time_fmt = myFmt
# Put on an xlabel, but only if we are making the bottom-most plot
if subplot_index[0] == self.axes.shape[0] - 1:
ax.set_xlabel('Time [UTC]')
if ydata is not None:
if cbar_label is None:
cbar_title = cbar_default
else:
cbar_title = ''.join(['(', cbar_label, ')'])
if colorbar_labels is not None:
cbar_title = None
cbar = self.add_colorbar(
mesh,
title=cbar_title,
subplot_index=subplot_index,
values=flag_values,
pad=cbar_h_adjust,
)
cbar.set_ticks(flag_values)
cbar.set_ticklabels(flag_meanings)
cbar.ax.tick_params(labelsize=10)
else:
self.add_colorbar(
mesh, title=cbar_title, subplot_index=subplot_index, pad=cbar_h_adjust
)
return ax
def plot_barbs_from_spd_dir(
self, speed_field, direction_field, pres_field=None, dsname=None, **kwargs
):
"""
This procedure will make a wind barb plot timeseries.
If a pressure field is given and the wind fields are 1D, which, for
example, would occur if one wants to plot a timeseries of
rawinsonde data, then a time-height cross section of
winds will be made.
Note: This procedure calls plot_barbs_from_u_v and will take in the
same keyword arguments as that procedure.
Parameters
----------
speed_field : str
The name of the field specifying the wind speed in m/s.
direction_field : str
The name of the field specifying the wind direction in degrees.
0 degrees is defined to be north and increases clockwise like
what is used in standard meteorological notation.
pres_field : str
The name of the field specifying pressure or height. If using
height coordinates, then we recommend setting invert_y_axis
to False.
dsname : str
The name of the datastream to plot. Setting to None will make
ACT attempt to autodetect this.
kwargs : dict
Any additional keyword arguments will be passed into
:func:`act.plotting.TimeSeriesDisplay.plot_barbs_from_u_and_v`.
Returns
-------
the_ax : matplotlib axis handle
The handle to the axis where the plot was made on.
Examples
--------
..code-block :: python
sonde_ds = act.io.armfiles.read_netcdf(
act.tests.sample_files.EXAMPLE_TWP_SONDE_WILDCARD)
BarbDisplay = act.plotting.TimeSeriesDisplay(
{'sonde_darwin': sonde_ds}, figsize=(10,5))
BarbDisplay.plot_barbs_from_spd_dir('deg', 'wspd', 'pres',
num_barbs_x=20)
"""
if dsname is None and len(self._obj.keys()) > 1:
raise ValueError(
'You must choose a datastream when there are 2 '
'or more datasets in the TimeSeriesDisplay '
'object.'
)
elif dsname is None:
dsname = list(self._obj.keys())[0]
# Make temporary field called tempu, tempv
spd = self._obj[dsname][speed_field]
dir = self._obj[dsname][direction_field]
tempu = -np.sin(np.deg2rad(dir)) * spd
tempv = -np.cos(np.deg2rad(dir)) * spd
self._obj[dsname]['temp_u'] = deepcopy(self._obj[dsname][speed_field])
self._obj[dsname]['temp_v'] = deepcopy(self._obj[dsname][speed_field])
self._obj[dsname]['temp_u'].values = tempu
self._obj[dsname]['temp_v'].values = tempv
the_ax = self.plot_barbs_from_u_v('temp_u', 'temp_v', pres_field, dsname, **kwargs)
del self._obj[dsname]['temp_u'], self._obj[dsname]['temp_v']
return the_ax
def plot_barbs_from_u_v(
self,
u_field,
v_field,
pres_field=None,
dsname=None,
subplot_index=(0,),
set_title=None,
day_night_background=False,
invert_y_axis=True,
num_barbs_x=20,
num_barbs_y=20,
use_var_for_y=None,
**kwargs,
):
"""
This function will plot a wind barb timeseries from u and v wind
data. If pres_field is given, a time-height series will be plotted
from 1-D wind data.
Parameters
----------
u_field : str
The name of the field containing the U component of the wind.
v_field : str
The name of the field containing the V component of the wind.
pres_field : str or None
The name of the field containing the pressure or height. Set
to None to not use this.
dsname : str or None
The name of the datastream to plot. Setting to None will make
ACT automatically try to determine this.
subplot_index : 2-tuple
The index of the subplot to make the plot on.
set_title : str or None
The title of the plot.
day_night_background : bool
Set to True to plot a day/night background.
invert_y_axis : bool
Set to True to invert the y axis (i.e. for plotting pressure as
the height coordinate).
num_barbs_x : int
The number of wind barbs to plot in the x axis.
num_barbs_y : int
The number of wind barbs to plot in the y axis.
cmap : matplotlib.colors.LinearSegmentedColormap
A color map to use with wind barbs. If this is set the plt.barbs
routine will be passed the C parameter scaled as sqrt of sum of the
squares and used with the passed in color map. A colorbar will also
be added. Setting the limits of the colorbar can be done with 'clim'.
Setting this changes the wind barbs from black to colors.
use_var_for_y : str
Set this to the name of a data variable in the Dataset to use as the
y-axis variable instead of the default dimension. Useful for instances
where data has an index-based dimension instead of a height-based
dimension. If shapes of arrays do not match it will automatically
revert back to the original ydata.
**kwargs : keyword arguments
Additional keyword arguments will be passed into plt.barbs.
Returns
-------
ax : matplotlib axis handle
The axis handle that contains the reference to the
constructed plot.
"""
if dsname is None and len(self._obj.keys()) > 1:
raise ValueError(
'You must choose a datastream when there are 2 '
'or more datasets in the TimeSeriesDisplay '
'object.'
)
elif dsname is None:
dsname = list(self._obj.keys())[0]
# Get data and dimensions
u = self._obj[dsname][u_field].values
v = self._obj[dsname][v_field].values
dim = list(self._obj[dsname][u_field].dims)
xdata = self._obj[dsname][dim[0]].values
num_x = xdata.shape[-1]
barb_step_x = round(num_x / num_barbs_x)
if barb_step_x == 0:
barb_step_x = 1
if len(dim) > 1 and pres_field is None:
if use_var_for_y is None:
ydata = self._obj[dsname][dim[1]]
else:
ydata = self._obj[dsname][use_var_for_y]
ydata_dim1 = self._obj[dsname][dim[1]]
if np.shape(ydata) != np.shape(ydata_dim1):
ydata = ydata_dim1
if 'units' in ydata.attrs:
units = ydata.attrs['units']
else:
units = ''
ytitle = ''.join(['(', units, ')'])
num_y = ydata.shape[0]
barb_step_y = round(num_y / num_barbs_y)
if barb_step_y == 0:
barb_step_y = 1
xdata, ydata = np.meshgrid(xdata, ydata, indexing='ij')
elif pres_field is not None:
# What we will do here is do a nearest-neighbor interpolation
# for each member of the series. Coordinates are time, pressure
pres = self._obj[dsname][pres_field]
u_interp = NearestNDInterpolator((xdata, pres.values), u, rescale=True)
v_interp = NearestNDInterpolator((xdata, pres.values), v, rescale=True)
barb_step_x = 1
barb_step_y = 1
x_times = pd.date_range(xdata.min(), xdata.max(), periods=num_barbs_x)
if num_barbs_y == 1:
y_levels = pres.mean()
else:
y_levels = np.linspace(np.nanmin(pres), np.nanmax(pres), num_barbs_y)
xdata, ydata = np.meshgrid(x_times, y_levels, indexing='ij')
u = u_interp(xdata, ydata)
v = v_interp(xdata, ydata)
if 'units' in pres.attrs:
units = pres.attrs['units']
else:
units = ''
ytitle = ''.join(['(', units, ')'])
else:
ydata = None
# Get the current plotting axis, add day/night background and plot data
if self.fig is None:
self.fig = plt.figure()
if self.axes is None:
self.axes = np.array([plt.axes()])
self.fig.add_axes(self.axes[0])
if ydata is None:
ydata = np.ones(xdata.shape)
if 'cmap' in kwargs.keys():
map_color = np.sqrt(np.power(u[::barb_step_x], 2) + np.power(v[::barb_step_x], 2))
map_color[np.isnan(map_color)] = 0
ax = self.axes[subplot_index].barbs(
xdata[::barb_step_x],
ydata[::barb_step_x],
u[::barb_step_x],
v[::barb_step_x],
map_color,
**kwargs,
)
plt.colorbar(
ax,
ax=[self.axes[subplot_index]],
label='Wind Speed (' + self._obj[dsname][u_field].attrs['units'] + ')',
)
else:
self.axes[subplot_index].barbs(
xdata[::barb_step_x],
ydata[::barb_step_x],
u[::barb_step_x],
v[::barb_step_x],
**kwargs,
)
self.axes[subplot_index].set_yticks([])
else:
if 'cmap' in kwargs.keys():
map_color = np.sqrt(
np.power(u[::barb_step_x, ::barb_step_y], 2)
+ np.power(v[::barb_step_x, ::barb_step_y], 2)
)
map_color[np.isnan(map_color)] = 0
ax = self.axes[subplot_index].barbs(
xdata[::barb_step_x, ::barb_step_y],
ydata[::barb_step_x, ::barb_step_y],
u[::barb_step_x, ::barb_step_y],
v[::barb_step_x, ::barb_step_y],
map_color,
**kwargs,
)
plt.colorbar(
ax,
ax=[self.axes[subplot_index]],
label='Wind Speed (' + self._obj[dsname][u_field].attrs['units'] + ')',
)
else:
ax = self.axes[subplot_index].barbs(
xdata[::barb_step_x, ::barb_step_y],
ydata[::barb_step_x, ::barb_step_y],
u[::barb_step_x, ::barb_step_y],
v[::barb_step_x, ::barb_step_y],
**kwargs,
)
if day_night_background is True:
self.day_night_background(subplot_index=subplot_index, dsname=dsname)
# Set Title
if set_title is None:
set_title = ' '.join(
[
dsname,
'on',
dt_utils.numpy_to_arm_date(self._obj[dsname].time.values[0]),
]
)
self.axes[subplot_index].set_title(set_title)
# Set YTitle
if 'ytitle' in locals():
self.axes[subplot_index].set_ylabel(ytitle)
# Set X Limit - We want the same time axes for all subplots
time_rng = [xdata.min(), xdata.max()]
self.set_xrng(time_rng, subplot_index)
# Set Y Limit
if hasattr(self, 'yrng'):
# Make sure that the yrng is not just the default
if not np.all(self.yrng[subplot_index] == 0):
self.set_yrng(self.yrng[subplot_index], subplot_index)
else:
if ydata is None:
our_data = xdata
else:
our_data = ydata
if np.isfinite(our_data).any():
if invert_y_axis is False:
yrng = [np.nanmin(our_data), np.nanmax(our_data)]
else:
yrng = [np.nanmax(our_data), np.nanmin(our_data)]
else:
yrng = [0, 1]
self.set_yrng(yrng, subplot_index)
# Set X Format
if len(subplot_index) == 1:
days = self.xrng[subplot_index, 1] - self.xrng[subplot_index, 0]
else:
days = (
self.xrng[subplot_index[0], subplot_index[1], 1]
- self.xrng[subplot_index[0], subplot_index[1], 0]
)
# Put on an xlabel, but only if we are making the bottom-most plot
if subplot_index[0] == self.axes.shape[0] - 1:
self.axes[subplot_index].set_xlabel('Time [UTC]')
myFmt = common.get_date_format(days)
self.axes[subplot_index].xaxis.set_major_formatter(myFmt)
return self.axes[subplot_index]
def plot_time_height_xsection_from_1d_data(
self,
data_field,
pres_field,
dsname=None,
subplot_index=(0,),
set_title=None,
day_night_background=False,
num_time_periods=20,
num_y_levels=20,
invert_y_axis=True,
cbar_label=None,
set_shading='auto',
**kwargs,
):
"""
This will plot a time-height cross section from 1D datasets using
nearest neighbor interpolation on a regular time by height grid.
All that is needed are a data variable and a height variable.
Parameters
----------
data_field : str
The name of the field to plot.
pres_field : str
The name of the height or pressure field to plot.
dsname : str or None
The name of the datastream to plot
subplot_index : 2-tuple
The index of the subplot to create the plot on.
set_title : str or None
The title of the plot.
day_night_background : bool
Set to true to plot the day/night background.
num_time_periods : int
Set to determine how many time periods. Setting to None
will do one time period per day.
num_y_levels : int
The number of levels in the y axis to use.
invert_y_axis : bool
Set to true to invert the y-axis (recommended for
pressure coordinates).
cbar_label : str
Option to overwrite default colorbar label.
set_shading : string
Option to to set the matplotlib.pcolormesh shading parameter.
Default to 'auto'
**kwargs : keyword arguments
Additional keyword arguments will be passed
into :func:`plt.pcolormesh`
Returns
-------
ax : matplotlib axis handle
The matplotlib axis handle pointing to the plot.
"""
if dsname is None and len(self._obj.keys()) > 1:
raise ValueError(
'You must choose a datastream when there are 2'
'or more datasets in the TimeSeriesDisplay'
'object.'
)
elif dsname is None:
dsname = list(self._obj.keys())[0]
dim = list(self._obj[dsname][data_field].dims)
if len(dim) > 1:
raise ValueError(
'plot_time_height_xsection_from_1d_data only '
'supports 1-D datasets. For datasets with 2 or '
'more dimensions use plot().'
)
# Get data and dimensions
data = self._obj[dsname][data_field].values
xdata = self._obj[dsname][dim[0]].values
# What we will do here is do a nearest-neighbor interpolation for each
# member of the series. Coordinates are time, pressure
pres = self._obj[dsname][pres_field]
u_interp = NearestNDInterpolator((xdata, pres.values), data, rescale=True)
# Mask points where we have no data
# Count number of unique days
x_times = pd.date_range(xdata.min(), xdata.max(), periods=num_time_periods)
y_levels = np.linspace(np.nanmin(pres), np.nanmax(pres), num_y_levels)
tdata, ydata = np.meshgrid(x_times, y_levels, indexing='ij')
data = u_interp(tdata, ydata)
ytitle = ''.join(['(', pres.attrs['units'], ')'])
units = data_field + ' (' + self._obj[dsname][data_field].attrs['units'] + ')'
# Get the current plotting axis, add day/night background and plot data
if self.fig is None:
self.fig = plt.figure()
if self.axes is None:
self.axes = np.array([plt.axes()])
self.fig.add_axes(self.axes[0])
mesh = self.axes[subplot_index].pcolormesh(
x_times, y_levels, np.transpose(data), shading=set_shading, **kwargs
)
if day_night_background is True:
self.day_night_background(subplot_index=subplot_index, dsname=dsname)
# Set Title
if set_title is None:
set_title = ' '.join(
[
dsname,
'on',
dt_utils.numpy_to_arm_date(self._obj[dsname].time.values[0]),
]
)
self.axes[subplot_index].set_title(set_title)
# Set YTitle
if 'ytitle' in locals():
self.axes[subplot_index].set_ylabel(ytitle)
# Set X Limit - We want the same time axes for all subplots
time_rng = [x_times[0], x_times[-1]]
self.set_xrng(time_rng, subplot_index)
# Set Y Limit
if hasattr(self, 'yrng'):
# Make sure that the yrng is not just the default
if not np.all(self.yrng[subplot_index] == 0):
self.set_yrng(self.yrng[subplot_index], subplot_index)
else:
if ydata is None:
our_data = data.values
else:
our_data = ydata
if np.isfinite(our_data).any():
if invert_y_axis is False:
yrng = [np.nanmin(our_data), np.nanmax(our_data)]
else:
yrng = [np.nanmax(our_data), np.nanmin(our_data)]
else:
yrng = [0, 1]
self.set_yrng(yrng, subplot_index)
# Set X Format
if len(subplot_index) == 1:
days = self.xrng[subplot_index, 1] - self.xrng[subplot_index, 0]
else:
days = (
self.xrng[subplot_index[0], subplot_index[1], 1]
- self.xrng[subplot_index[0], subplot_index[1], 0]
)
# Put on an xlabel, but only if we are making the bottom-most plot
if subplot_index[0] == self.axes.shape[0] - 1:
self.axes[subplot_index].set_xlabel('Time [UTC]')
if ydata is not None:
if cbar_label is None:
self.add_colorbar(mesh, title=units, subplot_index=subplot_index)
else:
self.add_colorbar(mesh, title=cbar_label, subplot_index=subplot_index)
myFmt = common.get_date_format(days)
self.axes[subplot_index].xaxis.set_major_formatter(myFmt)
return self.axes[subplot_index]
def time_height_scatter(
self,
data_field=None,
dsname=None,
cmap='rainbow',
alt_label=None,
alt_field='alt',
cb_label=None,
**kwargs,
):
"""
Create a time series plot of altitude and data variable with
color also indicating value with a color bar. The Color bar is
positioned to serve both as the indicator of the color intensity
and the second y-axis.
Parameters
----------
data_field : str
Name of data field in the object to plot on second y-axis.
height_field : str
Name of height field in the object to plot on first y-axis.
dsname : str or None
The name of the datastream to plot.
cmap : str
Colorbar color map to use.
alt_label : str
Altitude first y-axis label to use. If None, will try to use
long_name and units.
alt_field : str
Label for field in the object to plot on first y-axis.
cb_label : str
Colorbar label to use. If not set will try to use
long_name and units.
**kwargs : keyword arguments
Any other keyword arguments that will be passed
into TimeSeriesDisplay.plot module when the figure
is made.
"""
if dsname is None and len(self._obj.keys()) > 1:
raise ValueError(
'You must choose a datastream when there are 2 '
'or more datasets in the TimeSeriesDisplay '
'object.'
)
elif dsname is None:
dsname = list(self._obj.keys())[0]
# Get data and dimensions
data = self._obj[dsname][data_field]
altitude = self._obj[dsname][alt_field]
dim = list(self._obj[dsname][data_field].dims)
xdata = self._obj[dsname][dim[0]]
if alt_label is None:
try:
alt_label = altitude.attrs['long_name'] + ''.join(
[' (', altitude.attrs['units'], ')']
)
except KeyError:
alt_label = alt_field
if cb_label is None:
try:
cb_label = data.attrs['long_name'] + ''.join([' (', data.attrs['units'], ')'])
except KeyError:
cb_label = data_field
colorbar_map = plt.cm.get_cmap(cmap)
self.fig.subplots_adjust(left=0.1, right=0.86, bottom=0.16, top=0.91)
ax1 = self.plot(alt_field, color='black', **kwargs)
ax1.set_ylabel(alt_label)
ax2 = ax1.twinx()
sc = ax2.scatter(xdata.values, data.values, c=data.values, marker='.', cmap=colorbar_map)
cbaxes = self.fig.add_axes(
[
self.fig.subplotpars.right + 0.02,
self.fig.subplotpars.bottom,
0.02,
self.fig.subplotpars.top - self.fig.subplotpars.bottom,
]
)
cbar = plt.colorbar(sc, cax=cbaxes)
ax2.set_ylim(cbar.mappable.get_clim())
cbar.ax.set_ylabel(cb_label)
ax2.set_yticklabels([])
return self.axes[0]
def qc_flag_block_plot(
self,
data_field=None,
dsname=None,
subplot_index=(0,),
time_rng=None,
assessment_color=None,
edgecolor='face',
set_shading='auto',
**kwargs,
):
"""
Create a time series plot of embedded quality control values
using broken barh plotting.
Parameters
----------
data_field : str
Name of data field in the object to plot corresponding quality
control.
dsname : None or str
If there is more than one datastream in the display object the
name of the datastream needs to be specified. If set to None and
there is only one datastream ACT will use the sole datastream
in the object.
subplot_index : 1 or 2D tuple, list, or array
The index of the subplot to set the x range of.
time_rng : tuple or list
List or tuple with (min, max) values to set the x-axis range limits.
assessment_color : dict
Dictionary lookup to override default assessment to color. Make sure
assessment work is correctly set with case syntax.
edgecolor : str or list
Color name, list of color names or 'face' as defined in matplotlib.axes.Axes.broken_barh
set_shading : string
Option to to set the matplotlib.pcolormesh shading parameter.
Default to 'auto'
**kwargs : keyword arguments
The keyword arguments for :func:`plt.broken_barh`.
"""
# Color to plot associated with assessment.
color_lookup = {
'Bad': 'red',
'Incorrect': 'red',
'Indeterminate': 'orange',
'Suspect': 'orange',
'Missing': 'darkgray',
'Not Failing': 'green',
'Acceptable': 'green',
}
if assessment_color is not None:
for asses, color in assessment_color.items():
color_lookup[asses] = color
if asses == 'Incorrect':
color_lookup['Bad'] = color
if asses == 'Suspect':
color_lookup['Indeterminate'] = color
# Set up list of test names to use for missing values
missing_val_long_names = [
'Value equal to missing_value*',
'Value set to missing_value*',
'Value is equal to missing_value*',
'Value is set to missing_value*',
]
if dsname is None and len(self._obj.keys()) > 1:
raise ValueError(
'You must choose a datastream when there are 2 '
'or more datasets in the TimeSeriesDisplay '
'object.'
)
elif dsname is None:
dsname = list(self._obj.keys())[0]
# Set up or get current plot figure
if self.fig is None:
self.fig = plt.figure()
# Set up or get current axes
if self.axes is None:
self.axes = np.array([plt.axes()])
self.fig.add_axes(self.axes[0])
ax = self.axes[subplot_index]
# Set X Limit - We want the same time axes for all subplots
data = self._obj[dsname][data_field]
dim = list(self._obj[dsname][data_field].dims)
xdata = self._obj[dsname][dim[0]]
# Get data and attributes
qc_data_field = self._obj[dsname].qcfilter.check_for_ancillary_qc(
data_field, add_if_missing=False, cleanup=False
)
if qc_data_field is None:
raise ValueError(f'No quality control ancillary variable in Dataset for {data_field}')
flag_masks = self._obj[dsname][qc_data_field].attrs['flag_masks']
flag_meanings = self._obj[dsname][qc_data_field].attrs['flag_meanings']
flag_assessments = self._obj[dsname][qc_data_field].attrs['flag_assessments']
# Get time ranges for green blocks
time_delta = determine_time_delta(xdata.values)
barh_list_green = reduce_time_ranges(xdata.values, time_delta=time_delta, broken_barh=True)
# Set background to gray indicating not available data
ax.set_facecolor('dimgray')
# Check if plotting 2D data vs 1D data. 2D data will be summarized by
# assessment category instead of showing each test.
data_shape = self._obj[dsname][qc_data_field].shape
if len(data_shape) > 1:
cur_assessments = list(set(flag_assessments))
cur_assessments.sort()
cur_assessments.reverse()
qc_data = np.full(data_shape, -1, dtype=np.int16)
plot_colors = []
tick_names = []
index = self._obj[dsname][qc_data_field].values == 0
if index.any():
qc_data[index] = 0
plot_colors.append(color_lookup['Not Failing'])
tick_names.append('Not Failing')
for ii, assess in enumerate(cur_assessments):
if assess not in color_lookup:
color_lookup[assess] = list(mplcolors.CSS4_COLORS.keys())[ii]
ii += 1
assess_data = self._obj[dsname].qcfilter.get_masked_data(
data_field, rm_assessments=assess
)
if assess_data.mask.any():
qc_data[assess_data.mask] = ii
plot_colors.append(color_lookup[assess])
tick_names.append(assess)
# Overwrite missing data. Not sure if we want to do this because VAPs set
# the value to missing but the test is set to Bad. This tries to overcome that
# by looking for correct test description that would only indicate the values
# are missing not that they are set to missing by a test... most likely.
missing_test_nums = []
for ii, flag_meaning in enumerate(flag_meanings):
# Check if the bit set is indicating missing data.
for val in missing_val_long_names:
if re_search(val, flag_meaning):
test_num = parse_bit(flag_masks[ii])[0]
missing_test_nums.append(test_num)
assess_data = self._obj[dsname].qcfilter.get_masked_data(
data_field, rm_tests=missing_test_nums
)
if assess_data.mask.any():
qc_data[assess_data.mask] = -1
plot_colors.append(color_lookup['Missing'])
tick_names.append('Missing')
# Create a masked array to allow not plotting where values are missing
qc_data = np.ma.masked_equal(qc_data, -1)
dims = self._obj[dsname][qc_data_field].dims
xvalues = self._obj[dsname][dims[0]].values
yvalues = self._obj[dsname][dims[1]].values
cMap = mplcolors.ListedColormap(plot_colors)
mesh = ax.pcolormesh(
xvalues,
yvalues,
np.transpose(qc_data),
cmap=cMap,
vmin=0,
shading=set_shading,
)
divider = make_axes_locatable(ax)
# Determine correct placement of words on colorbar
tick_nums = (
np.arange(0, len(tick_names) * 2 + 1) / (len(tick_names) * 2) * np.nanmax(qc_data)
)[1::2]
cax = divider.append_axes('bottom', size='5%', pad=0.3)
cbar = self.fig.colorbar(
mesh,
cax=cax,
orientation='horizontal',
spacing='uniform',
ticks=tick_nums,
shrink=0.5,
)
cbar.ax.set_xticklabels(tick_names)
# Set YTitle
dim_name = list(set(self._obj[dsname][qc_data_field].dims) - {'time'})
try:
ytitle = f"{dim_name[0]} ({self._obj[dsname][dim_name[0]].attrs['units']})"
ax.set_ylabel(ytitle)
except KeyError:
pass
# Add which tests were set as text to the plot
unique_values = []
for ii in np.unique(self._obj[dsname][qc_data_field].values):
unique_values.extend(parse_bit(ii))
if len(unique_values) > 0:
unique_values = list(set(unique_values))
unique_values.sort()
unique_values = [str(ii) for ii in unique_values]
self.fig.text(
0.5,
-0.35,
f"QC Tests Tripped: {', '.join(unique_values)}",
transform=ax.transAxes,
horizontalalignment='center',
verticalalignment='center',
fontweight='bold',
)
else:
test_nums = []
for ii, assess in enumerate(flag_assessments):
if assess not in color_lookup:
color_lookup[assess] = list(mplcolors.CSS4_COLORS.keys())[ii]
# Plot green data first.
ax.broken_barh(
barh_list_green,
(ii, ii + 1),
facecolors=color_lookup['Not Failing'],
edgecolor=edgecolor,
**kwargs,
)
# Get test number from flag_mask bitpacked number
test_nums.append(parse_bit(flag_masks[ii]))
# Get masked array data to use mask for finding if/where test is set
data = self._obj[dsname].qcfilter.get_masked_data(
data_field, rm_tests=test_nums[-1]
)
if np.any(data.mask):
# Get time ranges from time and masked data
barh_list = reduce_time_ranges(
xdata.values[data.mask], time_delta=time_delta, broken_barh=True
)
# Check if the bit set is indicating missing data. If so change
# to different plotting color than what is in flag_assessments.
for val in missing_val_long_names:
if re_search(val, flag_meanings[ii]):
assess = 'Missing'
break
# Lay down blocks of tripped tests using correct color
ax.broken_barh(
barh_list,
(ii, ii + 1),
facecolors=color_lookup[assess],
edgecolor=edgecolor,
**kwargs,
)
# Add test description to plot.
ax.text(xdata.values[0], ii + 0.5, ' ' + flag_meanings[ii], va='center')
# Change y ticks to test number
plt.yticks(
[ii + 0.5 for ii in range(0, len(test_nums))],
labels=['Test ' + str(ii[0]) for ii in test_nums],
)
# Set ylimit to number of tests plotted
ax.set_ylim(0, len(flag_assessments))
# Set X Limit - We want the same time axes for all subplots
if not hasattr(self, 'time_rng'):
if time_rng is not None:
self.time_rng = list(time_rng)
else:
self.time_rng = [xdata.min().values, xdata.max().values]
self.set_xrng(self.time_rng, subplot_index)
# Get X format - We want the same time axes for all subplots
if hasattr(self, 'time_fmt'):
ax.xaxis.set_major_formatter(self.time_fmt)
else:
# Set X Format
if len(subplot_index) == 1:
days = self.xrng[subplot_index, 1] - self.xrng[subplot_index, 0]
else:
days = (
self.xrng[subplot_index[0], subplot_index[1], 1]
- self.xrng[subplot_index[0], subplot_index[1], 0]
)
myFmt = common.get_date_format(days)
ax.xaxis.set_major_formatter(myFmt)
self.time_fmt = myFmt
return self.axes[subplot_index]
def fill_between(
self,
field,
dsname=None,
subplot_index=(0,),
set_title=None,
secondary_y=False,
**kwargs,
):
"""
Makes a fill_between plot, based on matplotlib
Parameters
----------
field : str
The name of the field to plot.
dsname : None or str
If there is more than one datastream in the display object the
name of the datastream needs to be specified. If set to None and
there is only one datastream ACT will use the sole datastream
in the object.
subplot_index : 1 or 2D tuple, list, or array
The index of the subplot to set the x range of.
set_title : str
The title for the plot.
secondary_y : boolean
Option to indicate if the data should be plotted on second y-axis.
**kwargs : keyword arguments
The keyword arguments for :func:`plt.plot` (1D timeseries) or
:func:`plt.pcolormesh` (2D timeseries).
Returns
-------
ax : matplotlib axis handle
The matplotlib axis handle of the plot.
"""
if dsname is None and len(self._obj.keys()) > 1:
raise ValueError(
'You must choose a datastream when there are 2 '
'or more datasets in the TimeSeriesDisplay '
'object.'
)
elif dsname is None:
dsname = list(self._obj.keys())[0]
# Get data and dimensions
data = self._obj[dsname][field]
dim = list(self._obj[dsname][field].dims)
xdata = self._obj[dsname][dim[0]]
if 'units' in data.attrs:
ytitle = ''.join(['(', data.attrs['units'], ')'])
else:
ytitle = field
# Get the current plotting axis, add day/night background and plot data
if self.fig is None:
self.fig = plt.figure()
if self.axes is None:
self.axes = np.array([plt.axes()])
self.fig.add_axes(self.axes[0])
# Set ax to appropriate axis
if secondary_y is False:
ax = self.axes[subplot_index]
else:
ax = self.axes[subplot_index].twinx()
ax.fill_between(xdata.values, data, **kwargs)
# Set X Format
if len(subplot_index) == 1:
days = self.xrng[subplot_index, 1] - self.xrng[subplot_index, 0]
else:
days = (
self.xrng[subplot_index[0], subplot_index[1], 1]
- self.xrng[subplot_index[0], subplot_index[1], 0]
)
myFmt = common.get_date_format(days)
ax.xaxis.set_major_formatter(myFmt)
# Set X format - We want the same time axes for all subplots
if not hasattr(self, 'time_fmt'):
self.time_fmt = myFmt
# Put on an xlabel, but only if we are making the bottom-most plot
if subplot_index[0] == self.axes.shape[0] - 1:
self.axes[subplot_index].set_xlabel('Time [UTC]')
# Set YTitle
ax.set_ylabel(ytitle)
# Set Title
if set_title is None:
set_title = ' '.join(
[
dsname,
field,
'on',
dt_utils.numpy_to_arm_date(self._obj[dsname].time.values[0]),
]
)
if secondary_y is False:
ax.set_title(set_title)
return self.axes[subplot_index]
<|code_end|>
|
Additional plotting examples
Some additional needs were brought up with the ACT plotting examples. It would be useful to provide
- Examples for working with axes returned from plotting code and modifying them after plot generation
- Examples for using the integrated xarray plotting for quick plots
|
examples/plot_examples.py
<|code_start|><|code_end|>
|
examples/plot_examples.py
<|code_start|>"""
Xarray Plotting Examples
------------------------
This is an example of how to use some different aspects
of ACT's plotting tools as well as Xarray's tools.
"""
import matplotlib.pyplot as plt
import xarray as xr
import act
# Set up plot space ahead of time
fig, ax = plt.subplots(3, figsize=(10, 7))
# Plotting up high-temporal resolution 2D data can be very slow at times.
# In order to increase the speed, the data can be resampled to a courser
# resolution prior to plotting. Using Xarray's resample and selecting
# the nearest neighbor will greatly increase the speed.
obj = act.io.armfiles.read_netcdf(act.tests.EXAMPLE_CEIL1)
obj = obj.resample(time='1min').nearest()
# These data can be plotted up using the existing xarray functionality
# which is quick and easy
obj['backscatter'].plot(x='time', ax=ax[0])
# or using ACT
display = act.plotting.TimeSeriesDisplay(obj)
display.assign_to_figure_axis(fig, ax[1])
display.plot('backscatter')
# When using ACT, the axis object can also be manipulated using normal
# matplotlib calls for more personalized customizations
display = act.plotting.TimeSeriesDisplay(obj)
display.assign_to_figure_axis(fig, ax[2])
display.plot('backscatter')
display.axes[-1].set_ylim([0, 1500])
plt.show()
<|code_end|>
|
improving the function of the current skew-T plot
I am working on the VAP SondePram. We'd like to display the values of CAPE/CIN/LCL/LFC/LNB on the skew-T plot, and these values are supposed to reflect the calculation results of the VAP. Could you improve the skew-T display so that we can realize this function? Please let me know if you need any information.
|
examples/plotting/plot_skewt_with_text.py
<|code_start|><|code_end|>
|
examples/plotting/plot_skewt_with_text.py
<|code_start|>"""
Skew-T plot of a sounding
-------------------------
This example shows how to make a Skew-T plot from a sounding
and calculate stability indicies.
Author: Maxwell Grover
"""
import glob
import metpy
import numpy as np
import xarray as xr
from matplotlib import pyplot as plt
import act
# Make sure attributes are retained
xr.set_options(keep_attrs=True)
# Read data
file = sorted(glob.glob(act.tests.sample_files.EXAMPLE_TWP_SONDE_20060121))[-1]
sonde_ds = act.io.armfiles.read_netcdf(file)
# Calculate stability indicies
sonde_ds = act.retrievals.calculate_stability_indicies(
sonde_ds, temp_name='tdry', td_name='dp', p_name='pres', rh_name='rh'
)
# Plot the stability index values on the plot
variables = [
'lifted_index',
'surface_based_cape',
'surface_based_cin',
'most_unstable_cape',
'most_unstable_cin',
'lifted_condensation_level_temperature',
'lifted_condensation_level_pressure',
]
# Add a helper function which will format the text
def format_variable(variable, rounding_digits=2):
"""Format a sounding variable to displayed on a single line"""
return f'{variable}: {np.round(sonde_ds[variable], rounding_digits).values} {sonde_ds[variable].units}'
# Setup the plot
skewt = act.plotting.SkewTDisplay(sonde_ds, figsize=(12, 8))
# Add the stability indices
ax = skewt.axes[0]
props = dict(boxstyle='round', facecolor='wheat', alpha=0.5)
for i in range(len(variables)):
ax.text(
0.05,
(0.98 - (0.05 * i)),
format_variable(variables[i]),
transform=ax.transAxes,
fontsize=10,
verticalalignment='top',
bbox=props,
)
# Add data
skewt.plot_from_u_and_v('u_wind', 'v_wind', 'pres', 'tdry', 'dp', shade_cin=False)
sonde_ds.close()
plt.show()
<|code_end|>
|
Ability to handle time_bounds
A number of instruments have different methods for averaging to a timestamp, some have the timestamp at the end of the averaging interval and some as the start. Two examples are the ARM ECOR and EBBR systems at the SGP site and include the time_bounds variable in the dataset.
It would be beneficial to have a generic function that could update these times to a standard timeframe. I.e. default to adjusting the times so that it's a standard where the timestamp is at the start of the averaging interval.
|
act/utils/__init__.py
<|code_start|>"""
This module contains the common procedures used by all modules of the ARM
Community Toolkit.
"""
import lazy_loader as lazy
__getattr__, __dir__, __all__ = lazy.attach(
__name__,
submodules=['data_utils', 'datetime_utils', 'geo_utils', 'inst_utils', 'io_utils', 'qc_utils', 'radiance_utils', 'ship_utils'],
submod_attrs={
'data_utils': [
'ChangeUnits',
'accumulate_precip',
'add_in_nan',
'assign_coordinates',
'convert_units',
'create_pyart_obj',
'get_missing_value',
'ts_weighted_average',
'height_adjusted_pressure',
'height_adjusted_temperature',
'convert_to_potential_temp',
],
'datetime_utils': [
'dates_between',
'datetime64_to_datetime',
'determine_time_delta',
'numpy_to_arm_date',
'reduce_time_ranges',
'date_parser',
],
'geo_utils': [
'add_solar_variable',
'destination_azimuth_distance',
'get_solar_azimuth_elevation',
'get_sunrise_sunset_noon',
'is_sun_visible',
],
'inst_utils': ['decode_present_weather'],
'qc_utils': ['calculate_dqr_times'],
'radiance_utils': ['planck_converter'],
'ship_utils': ['calc_cog_sog', 'proc_scog'],
'io_utils': ['pack_tar',
'unpack_tar',
'cleanup_files',
'is_gunzip_file',
'pack_gzip',
'unpack_gzip'
],
},
)
<|code_end|>
act/utils/datetime_utils.py
<|code_start|>"""
Module that containing utilities involving datetimes.
"""
import datetime as dt
import warnings
import numpy as np
import pandas as pd
from scipy import stats
def dates_between(sdate, edate):
"""
Ths procedure returns all of the dates between *sdate* and *edate*.
Parameters
----------
sdate : str
The string containing the start date. The string is formatted
YYYYMMDD.
edate : str
The string containing the end date. The string is formatted
YYYYMMDD.
Returns
-------
all_dates : array of datetimes
The array containing the dates between *sdate* and *edate*.
"""
days = dt.datetime.strptime(edate, '%Y%m%d') - dt.datetime.strptime(sdate, '%Y%m%d')
all_dates = [
dt.datetime.strptime(sdate, '%Y%m%d') + dt.timedelta(days=d) for d in range(days.days + 1)
]
return all_dates
def numpy_to_arm_date(_date, returnTime=False):
"""
Given a numpy datetime64, return an ARM standard date (yyyymmdd).
Parameters
----------
date : numpy.datetime64
Numpy datetime64 date.
returnTime : boolean
If set to true, returns time instead of date
Returns
-------
arm_date : string or None
Returns an arm date.
"""
from dateutil.parser._parser import ParserError
try:
date = pd.to_datetime(str(_date))
if returnTime is False:
date = date.strftime('%Y%m%d')
else:
date = date.strftime('%H%M%S')
except ParserError:
date = None
return date
def reduce_time_ranges(time, time_delta=60, broken_barh=False):
"""
Given a time series, this function will return a list of tuples of time
ranges representing the contineous times where no data is detected missing.
Parameters
----------
time : numpy datetime64 array
The numpy array of date time values.
time_delta : int
The number of seconds to use as default time step in time array.
broken_barh : boolean
Option to return start time and duration instead of start time and
end time. This is used with the pyplot.broken_barh() plotting routine.
Returns
-------
time_ranges : list of tuples with 2 numpy datetime64 times
The time range(s) of contineous data.
"""
# Convert integer sections to numpy datetime64
time_delta = np.timedelta64(int(time_delta * 1000), 'ms')
# Make a difference array to find where time difference is great than time_delta
diff = np.diff(time)
dd = np.where(diff > time_delta)[0]
if len(dd) == 0:
return [(time[0], time[-1] - time[0])]
# A add to start and end of array for beginning and end values
dd = np.insert(dd, 0, -1)
dd = np.append(dd, len(time) - 1)
# Create a list of tuples containg time ranges or start time with duration
if broken_barh:
return [
(time[dd[ii] + 1], time[dd[ii + 1]] - time[dd[ii] + 1]) for ii in range(len(dd) - 1)
]
else:
return [(time[dd[ii] + 1], time[dd[ii + 1]]) for ii in range(len(dd) - 1)]
def determine_time_delta(time, default=60):
"""
Returns the most likely time step in seconds by analyzing the difference
in time steps.
Parameters
----------
time : numpy datetime64 array
The numpy array of date time values.
default : int or float
The default number to return if unable to calculate a value.
Returns
-------
time_delta : float
Returns the number of seconds for the most common time step. If can't
calculate a value the default value is returned.
"""
with warnings.catch_warnings():
warnings.filterwarnings('ignore', category=RuntimeWarning)
if time.size > 1:
mode = stats.mode(np.diff(time))
time_delta = mode.mode[0]
time_delta = time_delta.astype('timedelta64[s]').astype(float)
else:
time_delta = default
return float(time_delta)
def datetime64_to_datetime(time):
"""
Given a numpy datetime64 array time series, return datetime
(y, m, d, h, m, s)
Parameters
----------
time : numpy datetime64 array, list of numpy datetime64 values or
scalar numpy datetime64. The numpy array of date time values.
Returns
-------
datetime : list
Returns a list of datetimes (y, m, d, h, m, s) from a time series.
YYYY-MM-DD, DD.MM.YYYY, DD/MM/YYYY or YYYYMMDD.
"""
if isinstance(time, (tuple, list)):
time = np.array(time)
if len(time.shape) == 0:
time = np.array([time])
datetime_array = [
dt.datetime.fromtimestamp(
tm.astype('datetime64[ms]').astype('float') / 1000.0, tz=dt.timezone.utc
).replace(tzinfo=None)
for tm in time
]
return datetime_array
def date_parser(date_string, output_format='%Y%m%d', return_datetime=False):
"""Converts one datetime string to another or to
a datetime object.
Parameters
----------
date_string : str
datetime string to be parsed. Accepted formats are
YYYY-MM-DD, DD.MM.YYYY, DD/MM/YYYY or YYYYMMDD.
output_format : str
Format for datetime.strftime to output datetime string.
return_datetime : bool
If true, returns str as a datetime object.
Default is False.
returns
-------
datetime_str : str
A valid datetime string.
datetime_obj : datetime.datetime
A datetime object.
"""
date_fmts = [
'%Y-%m-%d',
'%d.%m.%Y',
'%d/%m/%Y',
'%Y%m%d',
'%Y/%m/%d',
'%Y-%m-%dT%H:%M:%S',
'%d.%m.%YT%H:%M:%S',
'%d/%m/%YT%H:%M:%S',
'%Y%m%dT%%H:%M:%S',
'%Y/%m/%dT%H:%M:%S',
]
for fmt in date_fmts:
try:
datetime_obj = dt.datetime.strptime(date_string, fmt)
if return_datetime:
return datetime_obj
else:
return datetime_obj.strftime(output_format)
except ValueError:
pass
fmt_strings = ', '.join(date_fmts)
raise ValueError('Invalid Date format, please use one of these formats ' + fmt_strings)
<|code_end|>
examples/workflows/plot_merged_product.py
<|code_start|><|code_end|>
|
act/utils/__init__.py
<|code_start|>"""
This module contains the common procedures used by all modules of the ARM
Community Toolkit.
"""
import lazy_loader as lazy
__getattr__, __dir__, __all__ = lazy.attach(
__name__,
submodules=['data_utils', 'datetime_utils', 'geo_utils', 'inst_utils', 'io_utils', 'qc_utils', 'radiance_utils', 'ship_utils'],
submod_attrs={
'data_utils': [
'ChangeUnits',
'accumulate_precip',
'add_in_nan',
'assign_coordinates',
'convert_units',
'create_pyart_obj',
'get_missing_value',
'ts_weighted_average',
'height_adjusted_pressure',
'height_adjusted_temperature',
'convert_to_potential_temp',
],
'datetime_utils': [
'dates_between',
'datetime64_to_datetime',
'determine_time_delta',
'numpy_to_arm_date',
'reduce_time_ranges',
'date_parser',
'adjust_timestamp'
],
'geo_utils': [
'add_solar_variable',
'destination_azimuth_distance',
'get_solar_azimuth_elevation',
'get_sunrise_sunset_noon',
'is_sun_visible',
],
'inst_utils': ['decode_present_weather'],
'qc_utils': ['calculate_dqr_times'],
'radiance_utils': ['planck_converter'],
'ship_utils': ['calc_cog_sog', 'proc_scog'],
'io_utils': ['pack_tar',
'unpack_tar',
'cleanup_files',
'is_gunzip_file',
'pack_gzip',
'unpack_gzip'
],
},
)
<|code_end|>
act/utils/datetime_utils.py
<|code_start|>"""
Module that containing utilities involving datetimes.
"""
import datetime as dt
import warnings
import numpy as np
import pandas as pd
from scipy import stats
def dates_between(sdate, edate):
"""
Ths procedure returns all of the dates between *sdate* and *edate*.
Parameters
----------
sdate : str
The string containing the start date. The string is formatted
YYYYMMDD.
edate : str
The string containing the end date. The string is formatted
YYYYMMDD.
Returns
-------
all_dates : array of datetimes
The array containing the dates between *sdate* and *edate*.
"""
days = dt.datetime.strptime(edate, '%Y%m%d') - dt.datetime.strptime(sdate, '%Y%m%d')
all_dates = [
dt.datetime.strptime(sdate, '%Y%m%d') + dt.timedelta(days=d) for d in range(days.days + 1)
]
return all_dates
def numpy_to_arm_date(_date, returnTime=False):
"""
Given a numpy datetime64, return an ARM standard date (yyyymmdd).
Parameters
----------
date : numpy.datetime64
Numpy datetime64 date.
returnTime : boolean
If set to true, returns time instead of date
Returns
-------
arm_date : string or None
Returns an arm date.
"""
from dateutil.parser._parser import ParserError
try:
date = pd.to_datetime(str(_date))
if returnTime is False:
date = date.strftime('%Y%m%d')
else:
date = date.strftime('%H%M%S')
except ParserError:
date = None
return date
def reduce_time_ranges(time, time_delta=60, broken_barh=False):
"""
Given a time series, this function will return a list of tuples of time
ranges representing the contineous times where no data is detected missing.
Parameters
----------
time : numpy datetime64 array
The numpy array of date time values.
time_delta : int
The number of seconds to use as default time step in time array.
broken_barh : boolean
Option to return start time and duration instead of start time and
end time. This is used with the pyplot.broken_barh() plotting routine.
Returns
-------
time_ranges : list of tuples with 2 numpy datetime64 times
The time range(s) of contineous data.
"""
# Convert integer sections to numpy datetime64
time_delta = np.timedelta64(int(time_delta * 1000), 'ms')
# Make a difference array to find where time difference is great than time_delta
diff = np.diff(time)
dd = np.where(diff > time_delta)[0]
if len(dd) == 0:
return [(time[0], time[-1] - time[0])]
# A add to start and end of array for beginning and end values
dd = np.insert(dd, 0, -1)
dd = np.append(dd, len(time) - 1)
# Create a list of tuples containg time ranges or start time with duration
if broken_barh:
return [
(time[dd[ii] + 1], time[dd[ii + 1]] - time[dd[ii] + 1]) for ii in range(len(dd) - 1)
]
else:
return [(time[dd[ii] + 1], time[dd[ii + 1]]) for ii in range(len(dd) - 1)]
def determine_time_delta(time, default=60):
"""
Returns the most likely time step in seconds by analyzing the difference
in time steps.
Parameters
----------
time : numpy datetime64 array
The numpy array of date time values.
default : int or float
The default number to return if unable to calculate a value.
Returns
-------
time_delta : float
Returns the number of seconds for the most common time step. If can't
calculate a value the default value is returned.
"""
with warnings.catch_warnings():
warnings.filterwarnings('ignore', category=RuntimeWarning)
if time.size > 1:
mode = stats.mode(np.diff(time))
time_delta = mode.mode[0]
time_delta = time_delta.astype('timedelta64[s]').astype(float)
else:
time_delta = default
return float(time_delta)
def datetime64_to_datetime(time):
"""
Given a numpy datetime64 array time series, return datetime
(y, m, d, h, m, s)
Parameters
----------
time : numpy datetime64 array, list of numpy datetime64 values or
scalar numpy datetime64. The numpy array of date time values.
Returns
-------
datetime : list
Returns a list of datetimes (y, m, d, h, m, s) from a time series.
YYYY-MM-DD, DD.MM.YYYY, DD/MM/YYYY or YYYYMMDD.
"""
if isinstance(time, (tuple, list)):
time = np.array(time)
if len(time.shape) == 0:
time = np.array([time])
datetime_array = [
dt.datetime.fromtimestamp(
tm.astype('datetime64[ms]').astype('float') / 1000.0, tz=dt.timezone.utc
).replace(tzinfo=None)
for tm in time
]
return datetime_array
def date_parser(date_string, output_format='%Y%m%d', return_datetime=False):
"""Converts one datetime string to another or to
a datetime object.
Parameters
----------
date_string : str
datetime string to be parsed. Accepted formats are
YYYY-MM-DD, DD.MM.YYYY, DD/MM/YYYY or YYYYMMDD.
output_format : str
Format for datetime.strftime to output datetime string.
return_datetime : bool
If true, returns str as a datetime object.
Default is False.
returns
-------
datetime_str : str
A valid datetime string.
datetime_obj : datetime.datetime
A datetime object.
"""
date_fmts = [
'%Y-%m-%d',
'%d.%m.%Y',
'%d/%m/%Y',
'%Y%m%d',
'%Y/%m/%d',
'%Y-%m-%dT%H:%M:%S',
'%d.%m.%YT%H:%M:%S',
'%d/%m/%YT%H:%M:%S',
'%Y%m%dT%%H:%M:%S',
'%Y/%m/%dT%H:%M:%S',
]
for fmt in date_fmts:
try:
datetime_obj = dt.datetime.strptime(date_string, fmt)
if return_datetime:
return datetime_obj
else:
return datetime_obj.strftime(output_format)
except ValueError:
pass
fmt_strings = ', '.join(date_fmts)
raise ValueError('Invalid Date format, please use one of these formats ' + fmt_strings)
def adjust_timestamp(ds, time_bounds='time_bounds', align='left', offset=None):
"""
Will adjust the timestamp based on the time_bounds or other information
so that the timestamp aligns with user preference.
Will work to adjust the times based on the time_bounds variable
but if it's not available will rely on the user supplied input
Parameters
----------
ds : Xarray Dataset
Dataset to adjust
time_bounds : str
Name of the time_bounds variable
align : str
Alignment of the time when using time_bounds.
left: Sets timestamp to start of sample interval
right: Sets timestamp to end of sample interval
center: Sets timestamp to middle of sample interval
offset : int
Time in seconds to offset the timestamp. This overrides
the time_bounds variable and can be positive or negative.
Required to be in seconds
Returns
-------
ds : Xarray DataSet
Adjusted DataSet
"""
if time_bounds in ds and offset is None:
time_bounds = ds[time_bounds].values
if align == 'left':
time_start = [np.datetime64(t[0]) for t in time_bounds]
elif align == 'right':
time_start = [np.datetime64(t[1]) for t in time_bounds]
elif align == 'center':
time_start = [np.datetime64(t[0]) + (np.datetime64(t[0]) - np.datetime64(t[1])) / 2. for t in time_bounds]
else:
raise ValueError('Align should be set to one of [left, right, middle]')
elif offset is not None:
time = ds['time'].values
time_start = [t + np.timedelta64(offset, 's') for t in time]
else:
raise ValueError('time_bounds variable is not available')
ds = ds.assign_coords({'time': time_start})
return ds
<|code_end|>
examples/workflows/plot_merged_product.py
<|code_start|>"""
Merge multiple datasets
-----------------------
Example to merge multiple data products into one using ACT.
Shows how to adjust the timestamp if the timestamps are at
different part of the sample interval (left, right, center).
Also shows how to apply QC information, merge and resample
data using xarray and plot/write out the results.
"""
import act
import xarray as xr
import matplotlib.pyplot as plt
# Set data files
# An alternative to this is to download data from the
# ARM Data Webservice as shown in the discovery plot_neon.py example
ebbr_file = act.tests.sample_files.EXAMPLE_EBBR3
ecor_file = act.tests.sample_files.EXAMPLE_ECOR
sebs_file = act.tests.sample_files.EXAMPLE_SEBS
# Read data into datasets
ds_ebbr = act.io.armfiles.read_netcdf(ebbr_file)
ds_ecor = act.io.armfiles.read_netcdf(ecor_file)
ds_sebs = act.io.armfiles.read_netcdf(sebs_file)
# Check for ARM DQRs and add them to the QC variables
ds_ebbr = act.qc.arm.add_dqr_to_qc(ds_ebbr)
ds_ecor = act.qc.arm.add_dqr_to_qc(ds_ecor)
ds_sebs = act.qc.arm.add_dqr_to_qc(ds_sebs)
# The ECOR and EBBR have different definitions of latent heat
# flux and what is positive vs negative. Check out the ARM
# Handbooks for more information
ds_ecor['lv_e'].values = ds_ecor['lv_e'].values * -1.
# For example purposes, let's rename the ecor latent heat flux
ds_ecor = ds_ecor.rename({'lv_e': 'latent_heat_flux_ecor'})
ds_ecor['latent_heat_flux_ecor'].attrs['ancillary_variables'] = 'qc_latent_heat_flux_ecor'
ds_ecor = ds_ecor.rename({'qc_lv_e': 'qc_latent_heat_flux_ecor'})
# Also going to Switch some QC for example purposes
qc = ds_ecor['qc_latent_heat_flux_ecor'].values
qc[10:20] = 2
ds_ecor['qc_latent_heat_flux_ecor'].values = qc
# There is a difference in how these timestamps are defined
# The EBBR is at the end of the sampling interval and the
# ECOR is at the beginning. Knowing this, we can shift the
# EBBR timestampes by 30 minutes to coincide with the ECOR
ds_ebbr = act.utils.datetime_utils.adjust_timestamp(ds_ebbr, offset=-30 * 60)
# Now, we can merge all these objects into one product
ds = xr.merge([ds_ecor, ds_ebbr, ds_sebs], compat='override')
# Apply the QC information to set all flagged data to missing/NaN
ds.qcfilter.datafilter(del_qc_var=False, rm_assessments=['Bad', 'Incorrect', 'Indeterminate', 'Suspect'])
# Plot up data from the merged object for each of the instruments
display = act.plotting.TimeSeriesDisplay(ds, figsize=(15, 10), subplot_shape=(3,))
display.plot('latent_heat_flux_ecor', label='ECOR', subplot_index=(0,))
display.plot('latent_heat_flux', label='EBBR', subplot_index=(0,))
plt.legend()
display.plot('surface_soil_heat_flux_1', label='SEBS', subplot_index=(1,))
# Plot out the QC information that was modified as well
display.qc_flag_block_plot('latent_heat_flux_ecor', subplot_index=(2,))
plt.show()
# Resample the data to 1 hour mean
# Check out the xarray documentation for more information
# on the resample function. Options include mean, median,
# max, min, sum, nearest, and more.
ds = ds.resample(time='H').mean(keep_attrs=True)
# Plot up data from the hourly merged object for ECOR and EBBR
display = act.plotting.TimeSeriesDisplay(ds, figsize=(15, 10), subplot_shape=(1,))
display.plot('latent_heat_flux_ecor', label='ECOR', subplot_index=(0,))
display.plot('latent_heat_flux', label='EBBR', subplot_index=(0,))
plt.show()
# Write data out to netcdf
ds.to_netcdf('./sgpecor_ebbr_sebs.nc')
<|code_end|>
|
Add ARM Data Surveyor Command Line Tool
### Description
Migrate functionality from the ARM Data Surveyor
https://code.arm.gov/dq/ADS
to ACT
|
scripts/ads.py
<|code_start|><|code_end|>
setup.py
<|code_start|>from os import path
from setuptools import setup, find_packages
import sys
import versioneer
# NOTE: This file must remain Python 2 compatible for the foreseeable future,
# to ensure that we error out properly for people with outdated setuptools
# and/or pip.
min_version = (3, 6)
if sys.version_info < min_version:
error = """
act does not support Python {}.{}.
Python {}.{} and above is required. Check your Python version like so:
python3 --version
This may be due to an out-of-date pip. Make sure you have pip >= 9.0.1.
Upgrade pip like so:
pip install --upgrade pip
""".format(
*sys.version_info[:2], *min_version
)
sys.exit(error)
here = path.abspath(path.dirname(__file__))
with open(path.join(here, 'README.rst'), encoding='utf-8') as readme_file:
readme = readme_file.read()
with open(path.join(here, 'requirements.txt')) as requirements_file:
# Parse requirements.txt, ignoring any commented-out lines.
requirements = [
line for line in requirements_file.read().splitlines() if not line.startswith('#')
]
setup(
name='act-atmos',
version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(),
description='Package for working with atmospheric time series datasets',
long_description=readme,
long_description_content_type='text/x-rst',
author='Adam Theisen',
author_email='[email protected]',
url='https://github.com/ARM-DOE/ACT',
packages=find_packages(exclude=['docs']),
entry_points={'console_scripts': []},
include_package_data=True,
package_data={'act': []},
install_requires=requirements,
license='BSD (3-clause)',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Natural Language :: English',
'Programming Language :: Python :: 3',
],
)
<|code_end|>
|
scripts/ads.py
<|code_start|>"""
ARM Data Surveyor (ADS)
Command line wrapper around ACT. Not all
features of ACT are included as options in ADS.
Please see the examples.txt for examples on how
to use ADS.
Author: Jason Hemedinger
"""
import argparse
import re
import json
import glob
import ast
import pathlib
import matplotlib.pyplot as plt
import numpy as np
import act
try:
import cartopy.crs as ccrs
CARTOPY_AVAILABLE = True
except ImportError:
CARTOPY_AVAILABLE = False
def option_error_check(args, error_fields, check_all=False):
'''
This will check the args object for keys to see if they are set. If
at least one key is not set will or all keys wiht check_all set
will print error message and exit.
'''
if not isinstance(error_fields, (list, tuple)):
error_fields = [error_fields]
print_error = False
how_many = 'one'
if check_all is False and not any([vars(args)[ii] for ii in error_fields]):
print_error = True
if check_all is True and not all([vars(args)[ii] for ii in error_fields]):
print_error = True
how_many = 'all'
if print_error:
prepend = '--'
for ii, value in enumerate(error_fields):
if not value.startswith(prepend):
error_fields[ii] = prepend + value
print(f"\n{pathlib.Path(__file__).name}: error: {how_many} of the arguments "
f"{' '.join(error_fields)} is requried\n")
exit()
def find_drop_vars(args):
'''
This will check if more than one file is to be read. If so read one file
and get list of variables to not read based on the fields arguments and
corresponding QC or dimention variables. This will significantly speed up
the reading time for reading many files.
'''
files = glob.glob(args.file_path)
drop_vars = []
if len(files) > 1:
ds = act.io.armfiles.read_netcdf(files[0])
ds.clean.cleanup()
drop_vars = set(ds.data_vars)
keep_vars = ['latitude', 'longitude']
if args.field is not None:
keep_vars.append(args.field)
if args.fields is not None:
keep_vars.extend(set(args.fields))
if args.wind_fields is not None:
keep_vars.extend(set(args.wind_fields))
if args.station_fields is not None:
keep_vars.extend(set(args.station_fields))
if args.latitude is not None:
keep_vars.append(args.latitude)
if args.longitude is not None:
keep_vars.append(args.longitude)
if args.x_field is not None:
keep_vars.append(args.x_field)
if args.y_field is not None:
keep_vars.append(args.y_field)
if args.u_wind is not None:
keep_vars.append(args.u_wind)
if args.v_wind is not None:
keep_vars.append(args.v_wind)
if args.p_field is not None:
keep_vars.append(args.p_field)
if args.t_field is not None:
keep_vars.append(args.t_field)
if args.td_field is not None:
keep_vars.append(args.td_field)
if args.spd_field is not None:
keep_vars.append(args.spd_field)
if args.dir_field is not None:
keep_vars.append(args.dir_field)
keep_vars_additional = []
for var_name in keep_vars:
qc_var_name = ds.qcfilter.check_for_ancillary_qc(
var_name, add_if_missing=False, cleanup=False)
if qc_var_name is not None:
keep_vars_additional.append(qc_var_name)
try:
keep_vars_additional.extend(ds[var_name].dims)
except KeyError:
pass
drop_vars = drop_vars - set(keep_vars) - set(keep_vars_additional)
return drop_vars
def geodisplay(args):
ds = act.io.armfiles.read_netcdf(args.file_path)
dsname = args.dsname
if dsname == _default_dsname:
try:
dsname = ds.attrs['datastream']
except KeyError:
pass
display = act.plotting.GeographicPlotDisplay({dsname: ds},
figsize=args.figsize)
display.geoplot(data_field=args.field, lat_field=args.latitude,
lon_field=args.longitude, dsname=dsname,
cbar_label=args.cb_label, title=args.set_title,
plot_buffer=args.plot_buffer, stamen=args.stamen,
tile=args.tile, cartopy_feature=args.cfeatures,
cmap=args.cmap, text=args.text, gridlines=args.gridlines,
projection=args.projection, **args.kwargs)
plt.savefig(args.out_path)
plt.show()
plt.close(display.fig)
ds.close()
def skewt(args):
ds = act.io.armfiles.read_netcdf(args.file_path)
subplot_index = args.subplot_index
dsname = args.dsname
if dsname == _default_dsname:
try:
dsname = ds.attrs['datastream']
except KeyError:
pass
display = act.plotting.SkewTDisplay({dsname: ds}, figsize=args.figsize)
if args.from_u_and_v:
display.plot_from_u_and_v(u_field=args.u_wind, v_field=args.v_wind,
p_field=args.p_field, t_field=args.t_field,
td_field=args.td_field,
subplot_index=subplot_index,
dsname=dsname, show_parcel=args.show_parcel,
p_levels_to_plot=args.plevels_plot,
shade_cape=args.shade_cape,
shade_cin=args.shade_cin,
set_title=args.set_title,
plot_barbs_kwargs=args.plot_barbs_kwargs,
plot_kwargs=args.plot_kwargs)
if args.from_spd_and_dir:
display.plot_from_spd_and_dir(spd_field=args.spd_field,
dir_field=args.dir_field,
p_field=args.p_field,
t_field=args.t_field,
td_field=args.td_field,
dsname=dsname,
**args.kwargs)
plt.savefig(args.out_path)
plt.show()
plt.close(display.fig)
ds.close()
def xsection(args):
ds = act.io.armfiles.read_netcdf(args.file_path)
subplot_index = args.subplot_index
dsname = args.dsname
if dsname == _default_dsname:
try:
dsname = ds.attrs['datastream']
except KeyError:
pass
display = act.plotting.XSectionDisplay({dsname: ds}, figsize=args.figsize)
if args.plot_xsection:
display.plot_xsection(dsname=dsname, varname=args.field,
x=args.x_field, y=args.y_field,
subplot_index=subplot_index,
sel_kwargs=args.sel_kwargs,
isel_kwargs=args.isel_kwargs, **args.kwargs)
if args.xsection_map:
display.plot_xsection_map(dsname=dsname, varname=args.field,
subplot_index=subplot_index,
coastlines=args.coastlines,
background=args.background,
**args.kwargs)
plt.savefig(args.out_path)
plt.show()
plt.close(display.fig)
ds.close()
def wind_rose(args):
drop_vars = find_drop_vars(args)
ds = act.io.armfiles.read_netcdf(args.file_path, drop_variables=drop_vars)
subplot_index = args.subplot_index
dsname = args.dsname
if dsname == _default_dsname:
try:
dsname = ds.attrs['datastream']
except KeyError:
pass
display = act.plotting.WindRoseDisplay({dsname: ds},
figsize=args.figsize)
display.plot(dir_field=args.dir_field, spd_field=args.spd_field,
subplot_index=subplot_index,
dsname=dsname, cmap=args.cmap,
set_title=args.set_title,
num_dirs=args.num_dir, spd_bins=args.spd_bins,
tick_interval=args.tick_interval, **args.kwargs)
plt.savefig(args.out_path)
plt.show()
plt.close(display.fig)
ds.close()
def timeseries(args):
drop_vars = find_drop_vars(args)
ds = act.io.armfiles.read_netcdf(args.file_path, drop_variables=drop_vars)
if args.cleanup:
ds.clean.cleanup()
subplot_shape = args.subplot_shape
subplot_index = args.subplot_index
dsname = args.dsname
if dsname == _default_dsname:
try:
dsname = ds.attrs['datastream']
except KeyError:
pass
display = act.plotting.TimeSeriesDisplay(
{dsname: ds}, figsize=args.figsize,
subplot_shape=subplot_shape)
options = ['plot', 'barbs_spd_dir', 'barbs_u_v', 'xsection_from_1d',
'time_height_scatter', 'qc', 'fill_between', 'multi_panel']
option_error_check(args, options)
if args.plot:
option_error_check(args, 'field')
if args.set_yrange is not None:
yrange = list(map(float, args.set_yrange))
else:
yrange = args.set_yrange
display.plot(
field=args.field, dsname=dsname, cmap=args.cmap,
set_title=args.set_title, add_nan=args.add_nan,
subplot_index=subplot_index,
use_var_for_y=args.var_y,
day_night_background=args.day_night,
invert_y_axis=args.invert_y_axis,
abs_limits=args.abs_limits, time_rng=args.time_rng,
assessment_overplot=args.assessment_overplot,
assessment_overplot_category=args.overplot_category,
assessment_overplot_category_color=args.category_color,
force_line_plot=args.force_line_plot, labels=args.labels,
cbar_label=args.cb_label, secondary_y=args.secondary_y,
y_rng=yrange,
**args.kwargs)
if args.barbs_spd_dir:
display.plot_barbs_from_spd_dir(
dir_field=args.dir_field,
spd_field=args.spd_field,
pres_field=args.p_field,
dsname=dsname,
**args.kwargs)
if args.barbs_u_v:
display.plot_barbs_from_u_v(
u_field=args.u_wind, v_field=args.v_wind,
pres_field=args.p_field, dsname=dsname,
set_title=args.set_title,
invert_y_axis=args.invert_y_axis,
day_night_background=args.day_night,
num_barbs_x=args.num_barb_x,
num_barbs_y=args.num_barb_y,
use_var_for_y=args.var_y,
subplot_index=subplot_index,
**args.kwargs)
if args.xsection_from_1d:
option_error_check(args, 'field')
display.plot_time_height_xsection_from_1d_data(
data_field=args.field, pres_field=args.p_field,
dsname=dsname, set_title=args.set_title,
day_night_background=args.day_night,
num_time_periods=args.num_time_periods,
num_y_levels=args.num_y_levels,
invert_y_axis=args.invert_y_axis,
subplot_index=subplot_index,
cbar_label=args.cb_label,
**args.kwargs)
if args.time_height_scatter:
option_error_check(args, 'field')
display.time_height_scatter(
data_field=args.field, dsname=dsname,
cmap=args.cmap, alt_label=args.alt_label,
alt_field=args.alt_field, cb_label=args.cb_label,
**args.kwargs)
if args.qc:
option_error_check(args, 'field')
display.qc_flag_block_plot(
data_field=args.field, dsname=dsname,
subplot_index=subplot_index,
time_rng=args.time_rng,
assessment_color=args.assessment_color,
**args.kwargs)
if args.fill_between:
option_error_check(args, 'field')
display.fill_between(
field=args.field, dsname=dsname,
subplot_index=subplot_index,
set_title=args.set_title,
secondary_y=args.secondary_y,
**args.kwargs)
if args.multi_panel:
option_error_check(args, ['fields', 'plot_type'], check_all=True)
for i, j, k in zip(args.fields, subplot_index, args.plot_type):
if k == 'plot':
display.plot(
field=i, dsname=dsname, cmap=args.cmap,
set_title=args.set_title, add_nan=args.add_nan,
subplot_index=j,
use_var_for_y=args.var_y,
day_night_background=args.day_night,
invert_y_axis=args.invert_y_axis,
abs_limits=args.abs_limits, time_rng=args.time_rng,
assessment_overplot=args.assessment_overplot,
assessment_overplot_category=args.overplot_category,
assessment_overplot_category_color=args.category_color,
force_line_plot=args.force_line_plot, labels=args.labels,
cbar_label=args.cb_label, secondary_y=args.secondary_y,
**args.kwargs)
if k == 'qc':
display.qc_flag_block_plot(
data_field=i, dsname=dsname,
subplot_index=j,
time_rng=args.time_rng,
assessment_color=args.assessment_color,
**args.kwargs)
plt.savefig(args.out_path)
plt.show()
plt.close(display.fig)
ds.close()
def histogram(args):
drop_vars = find_drop_vars(args)
ds = act.io.armfiles.read_netcdf(args.file_path, drop_variables=drop_vars)
subplot_shape = args.subplot_shape
subplot_index = args.subplot_index
dsname = args.dsname
if dsname == _default_dsname:
try:
dsname = ds.attrs['datastream']
except KeyError:
pass
display = act.plotting.HistogramDisplay(
{dsname: ds}, figsize=args.figsize,
subplot_shape=subplot_shape)
if args.stacked_bar_graph:
display.plot_stacked_bar_graph(
field=args.field, dsname=dsname,
bins=args.bins, density=args.density,
sortby_field=args.sortby_field,
sortby_bins=args.sortby_bins,
set_title=args.set_title,
subplot_index=subplot_index,
**args.kwargs)
if args.size_dist:
display.plot_size_distribution(
field=args.field, bins=args.bin_field,
time=args.time, dsname=dsname,
set_title=args.set_title,
subplot_index=subplot_index,
**args.kwargs)
if args.stairstep:
display.plot_stairstep_graph(
field=args.field, dsname=dsname,
bins=args.bins, density=args.density,
sortby_field=args.sortby_field,
sortby_bins=args.sortby_bins,
set_title=args.set_title,
subplot_index=subplot_index,
**args.kwargs)
if args.heatmap:
display.plot_heatmap(
x_field=args.x_field, y_field=args.y_field,
dsname=dsname, x_bins=args.x_bins,
y_bins=args.y_bins, set_title=args.set_title,
density=args.density,
subplot_index=subplot_index, **args.kwargs)
plt.savefig(args.out_path)
plt.show()
plt.close(display.fig)
ds.close()
def contour(args):
files = glob.glob(args.file_path)
files.sort()
time = args.time
data = {}
fields = {}
wind_fields = {}
station_fields = {}
for f in files:
ds = act.io.armfiles.read_netcdf(f)
data.update({f: ds})
fields.update({f: args.fields})
wind_fields.update({f: args.wind_fields})
station_fields.update({f: args.station_fields})
display = act.plotting.ContourDisplay(data, figsize=args.figsize)
if args.create_contour:
display.create_contour(fields=fields, time=time, function=args.function,
grid_delta=args.grid_delta,
grid_buffer=args.grid_buffer,
subplot_index=args.subplot_index,
**args.kwargs)
if args.contourf:
display.contourf(x=args.x, y=args.y, z=args.z,
subplot_index=args.subplot_index,
**args.kwargs)
if args.plot_contour:
display.contour(x=args.x, y=args.y, z=args.z,
subplot_index=args.subplot_index,
**args.kwargs)
if args.vectors_spd_dir:
display.plot_vectors_from_spd_dir(fields=wind_fields, time=time,
mesh=args.mesh, function=args.function,
grid_delta=args.grid_delta,
grid_buffer=args.grid_buffer,
subplot_index=args.subplot_index,
**args.kwargs)
if args.barbs:
display.barbs(x=args.x, y=args.y, u=args.u, v=args.v,
subplot_index=args.subplot_index,
**args.kwargs)
if args.plot_station:
display.plot_station(fields=station_fields, time=time,
text_color=args.text_color,
subplot_index=args.subplot_index,
**args.kwargs)
plt.savefig(args.out_path)
plt.show()
plt.close(display.fig)
ds.close()
# Define new funciton for argparse to allow specific rules for
# parsing files containing arguments. This works by this function being
# called for each line in the configuration file.
def convert_arg_line_to_args(line):
for arg in line.split():
if not arg.strip(): # If empty line or only white space skip
continue
if arg.startswith('#'): # If line starts with comment skip
break
yield arg
def main():
prefix_char = '@'
parser = argparse.ArgumentParser(
description=(f'Create plot from a data file. Can use command line opitons '
f'or point to a configuration file using {prefix_char} character.'))
# Allow user to reference a file by using the @ symbol for a specific
# argument value
parser = argparse.ArgumentParser(fromfile_prefix_chars=prefix_char)
# Update the file parsing logic to skip commented lines
parser.convert_arg_line_to_args = convert_arg_line_to_args
parser.add_argument('-f', '--file_path', type=str, required=True,
help=('Required: Full path to file for creating Plot. For multiple '
'files use terminal syntax for matching muliple files. '
'For example "sgpmetE13.b1.202007*.*.nc" will match all files '
'for the month of July in 2020. Need to use double quotes '
'to stop terminal from expanding the search, and let the '
'python program perform search.'))
out_path_default = 'image.png'
parser.add_argument('-o', '--out_path', type=str, default=out_path_default,
help=("Full path filename to use for saving image. "
"Default is '{out_path_default}'. If only a path is given "
"will use that path with image name '{out_path_default}', "
"else will use filename given."))
parser.add_argument('-fd', '--field', type=str, default=None,
help='Name of the field to plot')
parser.add_argument('-fds', '--fields', nargs='+',
type=str, default=None,
help='Name of the fields to use to plot')
parser.add_argument('-wfs', '--wind_fields', nargs='+',
type=str, default=None,
help='Wind field names used to plot')
parser.add_argument('-sfs', '--station_fields', nargs='+',
type=str, default=None,
help='Station field names to plot sites')
default = 'lat'
parser.add_argument('-lat', '--latitude', type=str, default=default,
help=f"Name of latitude variable in file. Default is '{default}'")
default = 'lon'
parser.add_argument('-lon', '--longitude', type=str, default=default,
help=f"Name of longitude variable in file. Default is '{default}'")
parser.add_argument('-xf', '--x_field', type=str, default=None,
help='Name of variable to plot on x axis')
parser.add_argument('-yf', '--y_field', type=str, default=None,
help='Name of variable to plot on y axis')
parser.add_argument('-x', type=np.array,
help='x coordinates or grid for z')
parser.add_argument('-y', type=np.array,
help='y coordinates or grid for z')
parser.add_argument('-z', type=np.array,
help='Values over which to contour')
default = 'u_wind'
parser.add_argument('-u', '--u_wind', type=str, default=default,
help=f"File variable name for u_wind wind component. Default is '{default}'")
default = 'v_wind'
parser.add_argument('-v', '--v_wind', type=str, default=default,
help=f"File variable name for v_wind wind compenent. Default is '{default}'")
default = 'pres'
parser.add_argument('-pf', '--p_field', type=str, default=default,
help=f"File variable name for pressure. Default is '{default}'")
default = 'tdry'
parser.add_argument('-tf', '--t_field', type=str, default=default,
help=f"File variable name for temperature. Default is '{default}'")
default = 'dp'
parser.add_argument('-tdf', '--td_field', type=str, default=default,
help=f"File variable name for dewpoint temperature. Default is '{default}'")
default = 'wspd'
parser.add_argument('-sf', '--spd_field', type=str, default=default,
help=f"File variable name for wind speed. Default is '{default}'")
default = 'deg'
parser.add_argument('-df', '--dir_field', type=str, default=default,
help=f"File variable name for wind direction. Default is '{default}'")
parser.add_argument('-al', '--alt_label', type=str, default=None,
help='Altitude axis label')
default = 'alt'
parser.add_argument('-af', '--alt_field', type=str, default=default,
help=f"File variable name for altitude. Default is '{default}'")
global _default_dsname
_default_dsname = 'act_datastream'
parser.add_argument('-ds', '--dsname', type=str, default=_default_dsname,
help=f"Name of datastream to plot. Default is '{_default_dsname}'")
default = '(0, )'
parser.add_argument('-si', '--subplot_index', type=ast.literal_eval,
default=default,
help=f'Index of the subplot via tuple syntax. '
f'Example for two plots is "(0,), (1,)". '
f"Default is '{default}'")
default = (1, )
parser.add_argument('-ss', '--subplot_shape', nargs='+', type=int,
default=default,
help=(f'The number of (rows, columns) '
f'for the subplots in the display. '
f'Default is {default}'))
plot_type_options = ['plot', 'qc']
parser.add_argument('-pt', '--plot_type', nargs='+', type=str,
help=f'Type of plot to make. Current options include: '
f'{plot_type_options}')
parser.add_argument('-vy', '--var_y', type=str, default=None,
help=('Set this to the name of a data variable in '
'the Dataset to use as the y-axis variable '
'instead of the default dimension.'))
parser.add_argument('-plp', '--plevels_plot',
type=np.array, default=None,
help='Pressure levels to plot the wind barbs on.')
parser.add_argument('-cbl', '--cb_label', type=str, default=None,
help='Colorbar label to use')
parser.add_argument('-st', '--set_title', type=str, default=None,
help='Title for the plot')
default = 0.08
parser.add_argument('-pb', '--plot_buffer', type=float, default=default,
help=(f'Buffer to add around data on plot in lat '
f'and lon dimension. Default is {default}'))
default = 'terrain-background'
parser.add_argument('-sm', '--stamen', type=str, default=default,
help=f"Dataset to use for background image. Default is '{default}'")
default = 8
parser.add_argument('-tl', '--tile', type=int, default=default,
help=f'Tile zoom to use with background image. Default is {default}')
parser.add_argument('-cfs', '--cfeatures', nargs='+', type=str, default=None,
help='Cartopy feature to add to plot')
parser.add_argument('-txt', '--text', type=json.loads, default=None,
help=('Dictionary of {text:[lon,lat]} to add to plot. '
'Can have more than one set of text to add.'))
default = 'rainbow'
parser.add_argument('-cm', '--cmap', default=default,
help=f"colormap to use. Defaut is '{default}'")
parser.add_argument('-abl', '--abs_limits', nargs='+', type=float,
default=(None, None),
help=('Sets the bounds on plot limits even if data '
'values exceed those limits. Y axis limits. Default is no limits.'))
parser.add_argument('-tr', '--time_rng', nargs='+', type=float, default=None,
help=('List or tuple with (min,max) values to set the '
'x-axis range limits'))
default = 20
parser.add_argument('-nd', '--num_dir', type=int, default=default,
help=(f'Number of directions to splot the wind rose into. '
f'Default is {default}'))
parser.add_argument('-sb', '--spd_bins', nargs='+', type=float, default=None,
help='Bin boundaries to sort the wind speeds into')
default = 3
parser.add_argument('-ti', '--tick_interval', type=int, default=default,
help=(f'Interval (in percentage) for the ticks '
f'on the radial axis. Default is {default}'))
parser.add_argument('-ac', '--assessment_color', type=json.loads,
default=None,
help=('dictionary lookup to override default '
'assessment to color'))
default = False
parser.add_argument('-ao', '--assessment_overplot',
default=default, action='store_true',
help=(f'Option to overplot quality control colored '
f'symbols over plotted data using '
f'flag_assessment categories. Default is {default}'))
default = {'Incorrect': ['Bad', 'Incorrect'],
'Suspect': ['Indeterminate', 'Suspect']}
parser.add_argument('-oc', '--overplot_category', type=json.loads, default=default,
help=(f'Look up to categorize assessments into groups. '
f'This allows using multiple terms for the same '
f'quality control level of failure. '
f'Also allows adding more to the defaults. Default is {default}'))
default = {'Incorrect': 'red', 'Suspect': 'orange'}
parser.add_argument('-co', '--category_color', type=json.loads,
default=default,
help=(f'Lookup to match overplot category color to '
f'assessment grouping. Default is {default}'))
parser.add_argument('-flp', '--force_line_plot', default=False,
action='store_true',
help='Option to plot 2D data as 1D line plots')
parser.add_argument('-l', '--labels', nargs='+', default=False,
type=str,
help=('Option to overwrite the legend labels. '
'Must have same dimensions as number of '
'lines plottes.'))
parser.add_argument('-sy', '--secondary_y', default=False, action='store_true',
help='Option to plot on secondary y axis')
if CARTOPY_AVAILABLE:
default = ccrs.PlateCarree()
parser.add_argument('-prj', '--projection', type=str,
default=default,
help=f"Projection to use on plot. Default is {default}")
default = 20
parser.add_argument('-bx', '--num_barb_x', type=int, default=default,
help=f'Number of wind barbs to plot in the x axis. Default is {default}')
default = 20
parser.add_argument('-by', '--num_barb_y', type=int, default=default,
help=f"Number of wind barbs to plot in the y axis. Default is {default}")
default = 20
parser.add_argument('-tp', '--num_time_periods', type=int, default=default,
help=f'Set how many time periods. Default is {default}')
parser.add_argument('-bn', '--bins', nargs='+', type=int, default=None,
help='histogram bin boundaries to use')
parser.add_argument('-bf', '--bin_field', type=str, default=None,
help=('name of the field that stores the '
'bins for the spectra'))
parser.add_argument('-xb', '--x_bins', nargs='+', type=int, default=None,
help='Histogram bin boundaries to use for x axis variable')
parser.add_argument('-yb', '--y_bins', nargs='+', type=int, default=None,
help='Histogram bin boundaries to use for y axis variable')
parser.add_argument('-t', '--time', type=str, default=None,
help='Time period to be plotted')
parser.add_argument('-sbf', '--sortby_field', type=str, default=None,
help='Sort histograms by a given field parameter')
parser.add_argument('-sbb', '--sortby_bins', nargs='+', type=int,
default=None,
help='Bins to sort the histograms by')
default = 20
parser.add_argument('-nyl', '--num_y_levels', type=int, default=default,
help=f'Number of levels in the y axis to use. Default is {default}')
parser.add_argument('-sk', '--sel_kwargs', type=json.loads, default=None,
help=('The keyword arguments to pass into '
':py:func:`xarray.DataArray.sel`'))
parser.add_argument('-ik', '--isel_kwargs', type=json.loads, default=None,
help=('The keyword arguments to pass into '
':py:func:`xarray.DataArray.sel`'))
default = 'cubic'
parser.add_argument('-fn', '--function', type=str, default=default,
help=(f'Defaults to cubic function for interpolation. '
f'See scipy.interpolate.Rbf for additional options. '
f'Default is {default}'))
default = 0.1
parser.add_argument('-gb', '--grid_buffer', type=float, default=default,
help=f'Buffer to apply to grid. Default is {default}')
default = (0.01, 0.01)
parser.add_argument('-gd', '--grid_delta', nargs='+',
type=float, default=default,
help=f'X and Y deltas for creating grid. Default is {default}')
parser.add_argument('-fg', '--figsize', nargs='+', type=float,
default=None,
help='Width and height in inches of figure')
default = 'white'
parser.add_argument('-tc', '--text_color', type=str, default=default,
help=f"Color of text. Default is '{default}'")
parser.add_argument('-kwargs', type=json.loads, default=dict(),
help='keyword arguments to use in plotting function')
parser.add_argument('-pk', '--plot_kwargs', type=json.loads, default=dict(),
help=("Additional keyword arguments to pass "
"into MetPy's SkewT.plot"))
parser.add_argument('-pbk', '--plot_barbs_kwargs', type=json.loads,
default=dict(),
help=("Additional keyword arguments to pass "
"into MetPy's SkewT.plot_barbs"))
default = True
parser.add_argument('-cu', '--cleanup', default=default, action='store_false',
help=f'Turn off standard methods for obj cleanup. Default is {default}')
parser.add_argument('-gl', '--gridlines', default=False, action='store_true',
help='Use latitude and lingitude gridlines.')
parser.add_argument('-cl', '--coastlines', default=False, action='store_true',
help='Plot coastlines on geographical map')
parser.add_argument('-bg', '--background', default=False, action='store_true',
help='Plot a stock image background')
parser.add_argument('-nan', '--add_nan', default=False, action='store_true',
help='Fill in data gaps with NaNs')
parser.add_argument('-dn', '--day_night', default=False, action='store_true',
help=("Fill in color coded background according "
"to time of day."))
parser.add_argument('-yr', '--set_yrange', default=None, nargs=2,
help=("Set the yrange for the specific plot"))
parser.add_argument('-iya', '--invert_y_axis', default=False,
action='store_true',
help='Invert y axis')
parser.add_argument('-sp', '--show_parcel', default=False, action='store_true',
help='set to true to plot the parcel path.')
parser.add_argument('-cape', '--shade_cape', default=False,
action='store_true',
help='set to true to shade regions of cape.')
parser.add_argument('-cin', '--shade_cin', default=False, action='store_true',
help='set to true to shade regions of cin.')
parser.add_argument('-d', '--density', default=False, action='store_true',
help='Plot a p.d.f. instead of a frequency histogram')
parser.add_argument('-m', '--mesh', default=False, action='store_true',
help=('Set to True to interpolate u and v to '
'grid and create wind barbs'))
parser.add_argument('-uv', '--from_u_and_v', default=False, action='store_true',
help='Create SkewTPLot with u and v wind')
parser.add_argument('-sd', '--from_spd_and_dir', default=False, action='store_true',
help='Create SkewTPlot with wind speed and direction')
parser.add_argument('-px', '--plot_xsection', default=False, action='store_true',
help='plots a cross section whose x and y coordinates')
parser.add_argument('-pxm', '--xsection_map', default=False, action='store_true',
help='plots a cross section of 2D data on a geographical map')
parser.add_argument('-p', '--plot', default=False, action='store_true',
help='Makes a time series plot')
parser.add_argument('-mp', '--multi_panel', default=False,
action='store_true',
help='Makes a 2 panel timeseries plot')
parser.add_argument('-qc', '--qc', default=False, action='store_true',
help='Create time series plot of embedded quality control values')
parser.add_argument('-fb', '--fill_between', default=False, action='store_true',
help='makes a fill betweem plot based on matplotlib')
parser.add_argument('-bsd', '--barbs_spd_dir', default=False, action='store_true',
help=('Makes time series plot of wind barbs '
'using wind speed and dir.'))
parser.add_argument('-buv', '--barbs_u_v', default=False, action='store_true',
help=('Makes time series plot of wind barbs '
'using u and v wind components.'))
parser.add_argument('-pxs', '--xsection_from_1d', default=False,
action='store_true',
help='Will plot a time-height cross section from 1D dataset')
parser.add_argument('-ths', '--time_height_scatter',
default=False, action='store_true',
help='Create a scatter time series plot')
parser.add_argument('-sbg', '--stacked_bar_graph',
default=False, action='store_true',
help='Create stacked bar graph histogram')
parser.add_argument('-psd', '--size_dist', default=False, action='store_true',
help='Plots a stairstep plot of size distribution')
parser.add_argument('-sg', '--stairstep', default=False, action='store_true',
help='Plots stairstep plot of a histogram')
parser.add_argument('-hm', '--heatmap', default=False, action='store_true',
help='Plot a heatmap histogram from 2 variables')
parser.add_argument('-cc', '--create_contour', default=False, action='store_true',
help='Extracts, grids, and creates a contour plot')
parser.add_argument('-cf', '--contourf', default=False, action='store_true',
help=('Base function for filled contours if user '
'already has data gridded'))
parser.add_argument('-ct', '--plot_contour', default=False, action='store_true',
help=('Base function for contours if user '
'already has data gridded'))
parser.add_argument('-vsd', '--vectors_spd_dir', default=False, action='store_true',
help='Extracts, grids, and creates a contour plot.')
parser.add_argument('-b', '--barbs', default=False, action='store_true',
help='Base function for wind barbs.')
parser.add_argument('-ps', '--plot_station', default=False, action='store_true',
help='Extracts, grids, and creates a contour plot')
# The mutually exclusive but one requried group
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument('-gp', '--geodisplay', dest='action', action='store_const',
const=geodisplay, help='Set to genereate a geographic plot')
group.add_argument('-skt', '--skewt', dest='action', action='store_const',
const=skewt, help='Set to genereate a skew-t plot')
group.add_argument('-xs', '--xsection', dest='action', action='store_const',
const=xsection, help='Set to genereate a XSection plot')
group.add_argument('-wr', '--wind_rose', dest='action', action='store_const',
const=wind_rose, help='Set to genereate a wind rose plot')
group.add_argument('-ts', '--timeseries', dest='action', action='store_const',
const=timeseries, help='Set to genereate a timeseries plot')
group.add_argument('-c', '--contour', dest='action', action='store_const',
const=contour, help='Set to genereate a contour plot')
group.add_argument('-hs', '--histogram', dest='action', action='store_const',
const=histogram, help='Set to genereate a histogram plot')
args = parser.parse_args()
# Check if a path but no file name is given. If so use a default name.
out_path = pathlib.Path(args.out_path)
if out_path.is_dir():
args.out_path = str(pathlib.Path(out_path, out_path_default))
args.action(args)
if __name__ == '__main__':
main()
<|code_end|>
setup.py
<|code_start|>import glob
from os import path
from setuptools import setup, find_packages
import sys
import versioneer
# NOTE: This file must remain Python 2 compatible for the foreseeable future,
# to ensure that we error out properly for people with outdated setuptools
# and/or pip.
min_version = (3, 6)
if sys.version_info < min_version:
error = """
act does not support Python {}.{}.
Python {}.{} and above is required. Check your Python version like so:
python3 --version
This may be due to an out-of-date pip. Make sure you have pip >= 9.0.1.
Upgrade pip like so:
pip install --upgrade pip
""".format(
*sys.version_info[:2], *min_version
)
sys.exit(error)
here = path.abspath(path.dirname(__file__))
with open(path.join(here, 'README.rst'), encoding='utf-8') as readme_file:
readme = readme_file.read()
with open(path.join(here, 'requirements.txt')) as requirements_file:
# Parse requirements.txt, ignoring any commented-out lines.
requirements = [
line for line in requirements_file.read().splitlines() if not line.startswith('#')
]
setup(
name='act-atmos',
version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(),
description='Package for working with atmospheric time series datasets',
long_description=readme,
long_description_content_type='text/x-rst',
author='Adam Theisen',
author_email='[email protected]',
url='https://github.com/ARM-DOE/ACT',
packages=find_packages(exclude=['docs']),
entry_points={'console_scripts': []},
include_package_data=True,
package_data={'act': []},
scripts=glob.glob("scripts/*"),
install_requires=requirements,
license='BSD (3-clause)',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Natural Language :: English',
'Programming Language :: Python :: 3',
],
)
<|code_end|>
|
Valid Min in act.utils.decode_present_weather
* ACT version: 1.3.4
* Python version:3.9.15
* Operating System: Windows 10
### Description
I am receiving an error for "del data.attrs['valid_min'] when reading in data trying to decode present weather data.
### What I Did
```
act.utils.decode_present_weather(ds_fd70,'present_wx2',precip_codes)
decode_present_weather
del data.attrs['valid_min']
KeyError: 'valid_min'
When outputting time I have:
ds_fd70.time
array(['2023-03-09T16:45:00.000000000', '2023-03-09T17:00:00.000000000',
'2023-03-09T17:15:00.000000000', '2023-03-09T17:30:00.000000000',
'2023-03-09T17:45:00.000000000', '2023-03-09T18:00:00.000000000',
'2023-03-09T18:15:00.000000000', '2023-03-09T18:30:00.000000000',
'2023-03-09T18:45:00.000000000', '2023-03-09T19:00:00.000000000',
'2023-03-09T19:15:00.000000000', '2023-03-09T19:30:00.000000000',
```
|
act/utils/inst_utils.py
<|code_start|>"""
Functions containing utilities for instruments.
"""
def decode_present_weather(ds, variable=None, decoded_name=None):
"""
This function is to decode codes reported from automatic weather stations suchas the PWD22.
This is based on WMO Table 4680.
Parameters
----------
ds : xarray.Dataset
ACT or Xarray dataset from which to convert codes
variable : string
Variable to decode
decoded_name : string
New variable name to store updated labels
Returns
-------
ds : xarray.Dataset
Returns dataset with new decoded data
References
----------
WMO Manual on Code Volume I.1
https://www.wmo.int/pages/prog/www/WMOCodes/WMO306_vI1/Publications/2017update/Sel9.pdf
"""
# Check to ensure that a variable name is passed
if variable is None:
raise ValueError('You must specify a variable')
if variable not in ds:
raise ValueError('Variable not in the dataset')
# Define the weather hash
weather = {
0: 'No significant weather observed',
1: 'Clouds generally dissolving or becoming less developed during the past hour',
2: 'State of the sky on the whole unchanged during the past hour',
3: 'Clouds generally forming or developing during the past hour',
4: 'Haze or smoke, or dust in suspension in the air, visibility >= 1 km',
5: 'Haze or smoke, or dust in suspension in the air, visibility < 1 km',
10: 'Mist',
11: 'Diamond dust',
12: 'Distant lightning',
18: 'Squalls',
20: 'Fog',
21: 'Precipitation',
22: 'Drizzle (not freezing) or snow grains',
23: 'Rain (not freezing)',
24: 'Snow',
25: 'Freezing drizzle or freezing rain',
26: 'Thunderstorm (with or without precipitation)',
27: 'Blowing or drifting snow or sand',
28: 'Blowing or drifting snow or sand, visibility >= 1 km',
29: 'Blowing or drifting snow or sand, visibility < 1 km',
30: 'Fog',
31: 'Fog or ice fog in patches',
32: 'Fog or ice fog, has become thinner during the past hour',
33: 'Fog or ice fog, no appreciable change during the past hour',
34: 'Fog or ice fog, has begun or become thicker during the past hour',
35: 'Fog, depositing rime',
40: 'Precipitation',
41: 'Precipitation, slight or moderate',
42: 'Precipitation, heavy',
43: 'Liquid precipitation, slight or moderate',
44: 'Liquid precipitation, heavy',
45: 'Solid precipitation, slight or moderate',
46: 'Solid precipitation, heavy',
47: 'Freezing precipitation, slight or moderate',
48: 'Freezing precipitation, heavy',
50: 'Drizzle',
51: 'Drizzle, not freezing, slight',
52: 'Drizzle, not freezing, moderate',
53: 'Drizzle, not freezing, heavy',
54: 'Drizzle, freezing, slight',
55: 'Drizzle, freezing, moderate',
56: 'Drizzle, freezing, heavy',
57: 'Drizzle and rain, slight',
58: 'Drizzle and rain, moderate or heavy',
60: 'Rain',
61: 'Rain, not freezing, slight',
62: 'Rain, not freezing, moderate',
63: 'Rain, not freezing, heavy',
64: 'Rain, freezing, slight',
65: 'Rain, freezing, moderate',
66: 'Rain, freezing, heavy',
67: 'Rain (or drizzle) and snow, slight',
68: 'Rain (or drizzle) and snow, moderate or heavy',
70: 'Snow',
71: 'Snow, light',
72: 'Snow, moderate',
73: 'Snow, heavy',
74: 'Ice pellets, slight',
75: 'Ice pellets, moderate',
76: 'Ice pellets, heavy',
77: 'Snow grains',
78: 'Ice crystals',
80: 'Shower(s) or Intermittent Precipitation',
81: 'Rain shower(s) or intermittent rain, slight',
82: 'Rain shower(s) or intermittent rain, moderate',
83: 'Rain shower(s) or intermittent rain, heavy',
84: 'Rain shower(s) or intermittent rain, violent',
85: 'Snow shower(s) or intermittent snow, slight',
86: 'Snow shower(s) or intermittent snow, moderate',
87: 'Snow shower(s) or intermittent snow, heavy',
89: 'Hail',
90: 'Thunderstorm',
91: 'Thunderstorm, slight or moderate, with no precipitation',
92: 'Thunderstorm, slight or moderate, with rain showers and/or snow showers',
93: 'Thunderstorm, slight or moderate, with hail',
94: 'Thunderstorm, heavy, with no precipitation',
95: 'Thunderstorm, heavy, with rain showers and/or snow showers',
96: 'Thunderstorm, heavy, with hail',
99: 'Tornado',
-9999: 'Missing',
}
# If a decoded name is not passed, make one
if decoded_name is None:
decoded_name = variable + '_decoded'
# Get data and fill nans with -9999
data = ds[variable]
data = data.fillna(-9999)
# Get the weather type for each code
wx_type = [weather[d] for d in data.values]
# Massage the data array to set back in the dataset
data.values = wx_type
data.attrs['long_name'] = data.attrs['long_name'] + ' Decoded'
del data.attrs['valid_min']
del data.attrs['valid_max']
ds[decoded_name] = data
return ds
<|code_end|>
|
act/utils/inst_utils.py
<|code_start|>"""
Functions containing utilities for instruments.
"""
def decode_present_weather(ds, variable=None, decoded_name=None):
"""
This function is to decode codes reported from automatic weather stations suchas the PWD22.
This is based on WMO Table 4680.
Parameters
----------
ds : xarray.Dataset
ACT or Xarray dataset from which to convert codes
variable : string
Variable to decode
decoded_name : string
New variable name to store updated labels
Returns
-------
ds : xarray.Dataset
Returns dataset with new decoded data
References
----------
WMO Manual on Code Volume I.1
https://www.wmo.int/pages/prog/www/WMOCodes/WMO306_vI1/Publications/2017update/Sel9.pdf
"""
# Check to ensure that a variable name is passed
if variable is None:
raise ValueError('You must specify a variable')
if variable not in ds:
raise ValueError('Variable not in the dataset')
# Define the weather hash
weather = {
0: 'No significant weather observed',
1: 'Clouds generally dissolving or becoming less developed during the past hour',
2: 'State of the sky on the whole unchanged during the past hour',
3: 'Clouds generally forming or developing during the past hour',
4: 'Haze or smoke, or dust in suspension in the air, visibility >= 1 km',
5: 'Haze or smoke, or dust in suspension in the air, visibility < 1 km',
10: 'Mist',
11: 'Diamond dust',
12: 'Distant lightning',
18: 'Squalls',
20: 'Fog',
21: 'Precipitation',
22: 'Drizzle (not freezing) or snow grains',
23: 'Rain (not freezing)',
24: 'Snow',
25: 'Freezing drizzle or freezing rain',
26: 'Thunderstorm (with or without precipitation)',
27: 'Blowing or drifting snow or sand',
28: 'Blowing or drifting snow or sand, visibility >= 1 km',
29: 'Blowing or drifting snow or sand, visibility < 1 km',
30: 'Fog',
31: 'Fog or ice fog in patches',
32: 'Fog or ice fog, has become thinner during the past hour',
33: 'Fog or ice fog, no appreciable change during the past hour',
34: 'Fog or ice fog, has begun or become thicker during the past hour',
35: 'Fog, depositing rime',
40: 'Precipitation',
41: 'Precipitation, slight or moderate',
42: 'Precipitation, heavy',
43: 'Liquid precipitation, slight or moderate',
44: 'Liquid precipitation, heavy',
45: 'Solid precipitation, slight or moderate',
46: 'Solid precipitation, heavy',
47: 'Freezing precipitation, slight or moderate',
48: 'Freezing precipitation, heavy',
50: 'Drizzle',
51: 'Drizzle, not freezing, slight',
52: 'Drizzle, not freezing, moderate',
53: 'Drizzle, not freezing, heavy',
54: 'Drizzle, freezing, slight',
55: 'Drizzle, freezing, moderate',
56: 'Drizzle, freezing, heavy',
57: 'Drizzle and rain, slight',
58: 'Drizzle and rain, moderate or heavy',
60: 'Rain',
61: 'Rain, not freezing, slight',
62: 'Rain, not freezing, moderate',
63: 'Rain, not freezing, heavy',
64: 'Rain, freezing, slight',
65: 'Rain, freezing, moderate',
66: 'Rain, freezing, heavy',
67: 'Rain (or drizzle) and snow, slight',
68: 'Rain (or drizzle) and snow, moderate or heavy',
70: 'Snow',
71: 'Snow, light',
72: 'Snow, moderate',
73: 'Snow, heavy',
74: 'Ice pellets, slight',
75: 'Ice pellets, moderate',
76: 'Ice pellets, heavy',
77: 'Snow grains',
78: 'Ice crystals',
80: 'Shower(s) or Intermittent Precipitation',
81: 'Rain shower(s) or intermittent rain, slight',
82: 'Rain shower(s) or intermittent rain, moderate',
83: 'Rain shower(s) or intermittent rain, heavy',
84: 'Rain shower(s) or intermittent rain, violent',
85: 'Snow shower(s) or intermittent snow, slight',
86: 'Snow shower(s) or intermittent snow, moderate',
87: 'Snow shower(s) or intermittent snow, heavy',
89: 'Hail',
90: 'Thunderstorm',
91: 'Thunderstorm, slight or moderate, with no precipitation',
92: 'Thunderstorm, slight or moderate, with rain showers and/or snow showers',
93: 'Thunderstorm, slight or moderate, with hail',
94: 'Thunderstorm, heavy, with no precipitation',
95: 'Thunderstorm, heavy, with rain showers and/or snow showers',
96: 'Thunderstorm, heavy, with hail',
99: 'Tornado',
-9999: 'Missing',
}
# If a decoded name is not passed, make one
if decoded_name is None:
decoded_name = variable + '_decoded'
# Get data and fill nans with -9999
data = ds[variable]
data = data.fillna(-9999)
# Get the weather type for each code
wx_type = [weather[d] for d in data.values]
# Massage the data array to set back in the dataset
data.values = wx_type
if 'long_name' in data.attrs:
data.attrs['long_name'] = data.attrs['long_name'] + ' Decoded'
else:
data.attrs['long_name'] = 'Decoded present weather values'
if 'valid_min' in data.attrs:
del data.attrs['valid_min']
if 'valid_max' in data.attrs:
del data.attrs['valid_max']
ds[decoded_name] = data
return ds
<|code_end|>
|
Attempting to set identical low and high xlims
* ACT version: 1.4.0
* Python version: 3.8.13
* Operating System: Linux
### Description
When processing in an automated queue we sometimes process when there are few time steps. The process does not fail but does produce a warning.
/.conda/envs/dqo-base/lib/python3.8/site-packages/act/plotting/timeseriesdisplay.py:214: UserWarning: Attempting to set identical low and high xlims makes transformation singular; automatically expanding.
I want to add a check when setting xlim values to ensure start and end are not the same. If so automatically expand the xlim to exclude the warning.
|
act/plotting/act_cmap.py
<|code_start|>"""
Available colormaps (reversed versions also provided), these
colormaps are available within matplotlib with names act_COLORMAP':
* HomeyerRainbow
"""
import matplotlib as mpl
import matplotlib.cm
import matplotlib.colors as colors
from ._act_cmap import datad, yuv_rainbow_24
def _reverser(f):
"""perform reversal."""
def freversed(x):
"""f specific reverser."""
return f(1 - x)
return freversed
def revcmap(data):
"""Can only handle specification *data* in dictionary format."""
data_r = {}
for key, val in data.items():
if callable(val):
valnew = _reverser(val)
# This doesn't work: lambda x: val(1-x)
# The same "val" (the first one) is used
# each time, so the colors are identical
# and the result is shades of gray.
else:
# Flip x and exchange the y values facing x = 0 and x = 1.
valnew = [(1.0 - x, y1, y0) for x, y0, y1 in reversed(val)]
data_r[key] = valnew
return data_r
def _reverse_cmap_spec(spec):
"""Reverses cmap specification *spec*, can handle both dict and tuple
type specs."""
if isinstance(spec, dict) and 'red' in spec.keys():
return revcmap(spec)
else:
revspec = list(reversed(spec))
if len(revspec[0]) == 2: # e.g., (1, (1.0, 0.0, 1.0))
revspec = [(1.0 - a, b) for a, b in revspec]
return revspec
def _generate_cmap(name, lutsize):
"""Generates the requested cmap from it's name *name*. The lut size is
*lutsize*."""
spec = datad[name]
# Generate the colormap object.
if isinstance(spec, dict) and 'red' in spec.keys():
return colors.LinearSegmentedColormap(name, spec, lutsize)
else:
return colors.LinearSegmentedColormap.from_list(name, spec, lutsize)
cmap_d = dict()
LUTSIZE = mpl.rcParams['image.lut']
# need this list because datad is changed in loop
_cmapnames = list(datad.keys())
# Generate the reversed specifications ...
for cmapname in _cmapnames:
spec = datad[cmapname]
spec_reversed = _reverse_cmap_spec(spec)
datad[cmapname + '_r'] = spec_reversed
# Precache the cmaps with ``lutsize = LUTSIZE`` ...
# Use datad.keys() to also add the reversed ones added in the section above:
for cmapname in datad.keys():
cmap_d[cmapname] = _generate_cmap(cmapname, LUTSIZE)
locals().update(cmap_d)
# register the colormaps so that they can be accessed with the names act_XXX
for name, cmap in cmap_d.items():
full_name = 'act_' + name
mpl.cm.register_cmap(name=full_name, cmap=cmap)
<|code_end|>
act/plotting/timeseriesdisplay.py
<|code_start|>"""
Stores the class for TimeSeriesDisplay.
"""
import datetime as dt
import textwrap
import warnings
from copy import deepcopy
from re import search, search as re_search
import matplotlib as mpl
import matplotlib.dates as mdates
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from matplotlib import colors as mplcolors
from mpl_toolkits.axes_grid1 import make_axes_locatable
from scipy.interpolate import NearestNDInterpolator
from ..qc.qcfilter import parse_bit
from ..utils import data_utils, datetime_utils as dt_utils
from ..utils.datetime_utils import determine_time_delta, reduce_time_ranges
from ..utils.geo_utils import get_sunrise_sunset_noon
from . import common
from .plot import Display
class TimeSeriesDisplay(Display):
"""
This subclass contains routines that are specific to plotting
time series plots from data. It is inherited from Display and therefore
contains all of Display's attributes and methods.
Examples
--------
To create a TimeSeriesDisplay with 3 rows, simply do:
.. code-block:: python
ds = act.read_netcdf(the_file)
disp = act.plotting.TimeSeriesDisplay(ds, subplot_shape=(3,), figsize=(15, 5))
The TimeSeriesDisplay constructor takes in the same keyword arguments as
plt.subplots. For more information on the plt.subplots keyword arguments,
see the `matplotlib documentation
<https://matplotlib.org/api/_as_gen/matplotlib.pyplot.subplots.html>`_.
If no subplot_shape is provided, then no figure or axis will be created
until add_subplots or plots is called.
"""
def __init__(self, ds, subplot_shape=(1,), ds_name=None, **kwargs):
super().__init__(ds, subplot_shape, ds_name, **kwargs)
def day_night_background(self, dsname=None, subplot_index=(0,)):
"""
Colorcodes the background according to sunrise/sunset.
Parameters
----------
dsname : None or str
If there is more than one datastream in the display object the
name of the datastream needs to be specified. If set to None and
there is only one datastream then ACT will use the sole datastream
in the object.
subplot_index : 1 or 2D tuple, list, or array
The index to the subplot to place the day and night background in.
"""
if dsname is None and len(self._ds.keys()) > 1:
raise ValueError(
'You must choose a datastream to derive the '
+ 'information needed for the day and night '
+ 'background when 2 or more datasets are in '
+ 'the display object.'
)
elif dsname is None:
dsname = list(self._ds.keys())[0]
# Get File Dates
try:
file_dates = self._ds[dsname].attrs['_file_dates']
except KeyError:
file_dates = []
if len(file_dates) == 0:
sdate = dt_utils.numpy_to_arm_date(self._ds[dsname].time.values[0])
edate = dt_utils.numpy_to_arm_date(self._ds[dsname].time.values[-1])
file_dates = [sdate, edate]
all_dates = dt_utils.dates_between(file_dates[0], file_dates[-1])
if self.axes is None:
raise RuntimeError('day_night_background requires the plot to ' 'be displayed.')
ax = self.axes[subplot_index]
# Find variable names for latitude and longitude
variables = list(self._ds[dsname].data_vars)
lat_name = [var for var in ['lat', 'latitude'] if var in variables]
lon_name = [var for var in ['lon', 'longitude'] if var in variables]
if len(lat_name) == 0:
lat_name = None
else:
lat_name = lat_name[0]
if len(lon_name) == 0:
lon_name = None
else:
lon_name = lon_name[0]
# Variable name does not match, look for standard_name declaration
if lat_name is None or lon_name is None:
for var in variables:
try:
if self._ds[dsname][var].attrs['standard_name'] == 'latitude':
lat_name = var
except KeyError:
pass
try:
if self._ds[dsname][var].attrs['standard_name'] == 'longitude':
lon_name = var
except KeyError:
pass
if lat_name is not None and lon_name is not None:
break
if lat_name is None or lon_name is None:
return
# Extract latitude and longitude scalar from variable. If variable is a vector look
# for first non-Nan value.
lat_lon_list = [np.nan, np.nan]
for ii, var_name in enumerate([lat_name, lon_name]):
try:
values = self._ds[dsname][var_name].values
if values.size == 1:
lat_lon_list[ii] = float(values)
else:
# Look for non-NaN values to use for latitude locaiton. If not found use first value.
index = np.where(np.isfinite(values))[0]
if index.size == 0:
lat_lon_list[ii] = float(values[0])
else:
lat_lon_list[ii] = float(values[index[0]])
except AttributeError:
pass
for value, name in zip(lat_lon_list, ['Latitude', 'Longitude']):
if not np.isfinite(value):
warnings.warn(f"{name} value in dataset equal to '{value}' is not finite. ", RuntimeWarning)
return
lat = lat_lon_list[0]
lon = lat_lon_list[1]
lat_range = [-90, 90]
if not (lat_range[0] <= lat <= lat_range[1]):
warnings.warn(
f"Latitude value in dataset of '{lat}' not within acceptable "
f'range of {lat_range[0]} <= latitude <= {lat_range[1]}. ',
RuntimeWarning,
)
return
lon_range = [-180, 180]
if not (lon_range[0] <= lon <= lon_range[1]):
warnings.warn(
f"Longitude value in dataset of '{lon}' not within acceptable "
f'range of {lon_range[0]} <= longitude <= {lon_range[1]}. ',
RuntimeWarning,
)
return
# Initialize the plot to a gray background for total darkness
rect = ax.patch
rect.set_facecolor('0.85')
# Get date ranges to plot
plot_dates = []
for f in all_dates:
for ii in [-1, 0, 1]:
plot_dates.append(f + dt.timedelta(days=ii))
# Get sunrise, sunset and noon times
sunrise, sunset, noon = get_sunrise_sunset_noon(lat, lon, plot_dates)
# Plot daylight
for ii in range(0, len(sunrise)):
ax.axvspan(sunrise[ii], sunset[ii], facecolor='#FFFFCC', zorder=0)
# Plot noon line
for ii in noon:
ax.axvline(x=ii, linestyle='--', color='y', zorder=1)
def set_xrng(self, xrng, subplot_index=(0,)):
"""
Sets the x range of the plot.
Parameters
----------
xrng : 2 number array
The x limits of the plot.
subplot_index : 1 or 2D tuple, list, or array
The index of the subplot to set the x range of.
"""
if self.axes is None:
raise RuntimeError('set_xrng requires the plot to be displayed.')
self.axes[subplot_index].set_xlim(xrng)
# Make sure that the xrng value is a numpy array not pandas
if isinstance(xrng[0], pd.Timestamp):
xrng = [x.to_numpy() for x in xrng if isinstance(x, pd.Timestamp)]
# Make sure that the xrng value is a numpy array not datetime.datetime
if isinstance(xrng[0], dt.datetime):
xrng = [np.datetime64(x) for x in xrng if isinstance(x, dt.datetime)]
if len(subplot_index) < 2:
self.xrng[subplot_index, 0] = xrng[0].astype('datetime64[D]').astype(float)
self.xrng[subplot_index, 1] = xrng[1].astype('datetime64[D]').astype(float)
else:
self.xrng[subplot_index][0] = xrng[0].astype('datetime64[D]').astype(float)
self.xrng[subplot_index][1] = xrng[1].astype('datetime64[D]').astype(float)
def set_yrng(self, yrng, subplot_index=(0,)):
"""
Sets the y range of the plot.
Parameters
----------
yrng : 2 number array
The y limits of the plot.
subplot_index : 1 or 2D tuple, list, or array
The index of the subplot to set the x range of.
"""
if self.axes is None:
raise RuntimeError('set_yrng requires the plot to be displayed.')
if not hasattr(self, 'yrng') and len(self.axes.shape) == 2:
self.yrng = np.zeros((self.axes.shape[0], self.axes.shape[1], 2))
elif not hasattr(self, 'yrng') and len(self.axes.shape) == 1:
self.yrng = np.zeros((self.axes.shape[0], 2))
if yrng[0] == yrng[1]:
yrng[1] = yrng[1] + 1
self.axes[subplot_index].set_ylim(yrng)
try:
self.yrng[subplot_index, :] = yrng
except IndexError:
self.yrng[subplot_index] = yrng
def plot(
self,
field,
dsname=None,
subplot_index=(0,),
cmap=None,
set_title=None,
add_nan=False,
day_night_background=False,
invert_y_axis=False,
abs_limits=(None, None),
time_rng=None,
y_rng=None,
use_var_for_y=None,
set_shading='auto',
assessment_overplot=False,
overplot_marker='.',
overplot_behind=False,
overplot_markersize=6,
assessment_overplot_category={
'Incorrect': ['Bad', 'Incorrect'],
'Suspect': ['Indeterminate', 'Suspect'],
},
assessment_overplot_category_color={'Incorrect': 'red', 'Suspect': 'orange'},
force_line_plot=False,
labels=False,
cbar_label=None,
cbar_h_adjust=None,
secondary_y=False,
y_axis_flag_meanings=False,
colorbar_labels=None,
cb_friendly=False,
match_line_label_color=False,
**kwargs,
):
"""
Makes a timeseries plot. If subplots have not been added yet, an axis
will be created assuming that there is only going to be one plot.
If plotting a high data volume 2D dataset, it may take some time to plot.
In order to speed up your plot creation, please resample your data to a
lower resolution dataset.
Parameters
----------
field : str
The name of the field to plot.
dsname : None or str
If there is more than one datastream in the display object the
name of the datastream needs to be specified. If set to None and
there is only one datastream ACT will use the sole datastream
in the object.
subplot_index : 1 or 2D tuple, list, or array
The index of the subplot to set the x range of.
cmap : matplotlib colormap
The colormap to use.
set_title : str
The title for the plot.
add_nan : bool
Set to True to fill in data gaps with NaNs.
day_night_background : bool
Set to True to fill in a color coded background.
according to the time of day.
abs_limits : tuple or list
Sets the bounds on plot limits even if data values exceed
those limits. Set to (ymin,ymax). Use None if only setting
minimum or maximum limit, i.e. (22., None).
time_rng : tuple or list
List or tuple with (min, max) values to set the x-axis range
limits.
y_rng : tuple or list
List or tuple with (min, max) values to set the y-axis range
use_var_for_y : str
Set this to the name of a data variable in the Dataset to use as
the y-axis variable instead of the default dimension. Useful for
instances where data has an index-based dimension instead of a
height-based dimension. If shapes of arrays do not match it will
automatically revert back to the original ydata.
set_shading : string
Option to to set the matplotlib.pcolormesh shading parameter.
Default to 'auto'
assessment_overplot : boolean
Option to overplot quality control colored symbols over plotted
data using flag_assessment categories.
overplot_marker : str
Marker to use for overplot symbol.
overplot_behind : bool
Place the overplot marker behind the data point.
overplot_markersize : float or int
Size of overplot marker. If overplot_behind or force_line_plot
are set the marker size will be double overplot_markersize so
the color is visible.
assessment_overplot_category : dict
Lookup to categorize assessments into groups. This allows using
multiple terms for the same quality control level of failure.
Also allows adding more to the defaults.
assessment_overplot_category_color : dict
Lookup to match overplot category color to assessment grouping.
force_line_plot : boolean
Option to plot 2D data as 1D line plots.
labels : boolean or list
Option to overwrite the legend labels. Must have same dimensions as
number of lines plotted.
cbar_label : str
Option to overwrite default colorbar label.
cbar_h_adjust : float
Option to adjust location of colorbar horizontally. Positive values
move to right negative values move to left.
secondary_y : boolean
Option to plot on secondary y axis.
y_axis_flag_meanings : boolean or int
When set to True and plotting state variable with flag_values and
flag_meanings attributes will replace y axis numerical values
with flag_meanings value. Set to a positive number larger than 1
to indicate maximum word length to use. If text is longer that the
value and has space characters will split text over multiple lines.
colorbar_labels : dict
A dictionary containing values for plotting a 2D array of state variables.
The dictionary uses data values as keys and a dictionary containing keys
'text' and 'color' for each data value to plot.
Example:
{0: {'text': 'Clear sky', 'color': 'white'},
1: {'text': 'Liquid', 'color': 'green'},
2: {'text': 'Ice', 'color': 'blue'},
3: {'text': 'Mixed phase', 'color': 'purple'}}
cb_friendly : boolean
Set to true if you want to use the integrated colorblind friendly
colors for green/red based on the Homeyer colormap.
match_line_label_color : boolean
Will set the y label to match the line color in the plot. This
will only work if the time series plot is a line plot.
**kwargs : keyword arguments
The keyword arguments for :func:`plt.plot` (1D timeseries) or
:func:`plt.pcolormesh` (2D timeseries).
Returns
-------
ax : matplotlib axis handle
The matplotlib axis handle of the plot.
"""
if dsname is None and len(self._ds.keys()) > 1:
raise ValueError(
'You must choose a datastream when there are 2 '
'or more datasets in the TimeSeriesDisplay '
'object.'
)
elif dsname is None:
dsname = list(self._ds.keys())[0]
if y_axis_flag_meanings:
kwargs['linestyle'] = ''
if cb_friendly:
cmap = 'act_HomeyerRainbow'
assessment_overplot_category_color['Bad'] = (0.9285714285714286, 0.7130901016453677, 0.7130901016453677)
assessment_overplot_category_color['Incorrect'] = (0.9285714285714286, 0.7130901016453677, 0.7130901016453677)
assessment_overplot_category_color['Not Failing'] = (0.0, 0.4240129715562796, 0.4240129715562796),
assessment_overplot_category_color['Acceptable'] = (0.0, 0.4240129715562796, 0.4240129715562796),
# Get data and dimensions
data = self._ds[dsname][field]
dim = list(self._ds[dsname][field].dims)
xdata = self._ds[dsname][dim[0]]
if 'units' in data.attrs:
ytitle = ''.join(['(', data.attrs['units'], ')'])
else:
ytitle = field
if cbar_label is None:
cbar_default = ytitle
if len(dim) > 1:
if use_var_for_y is None:
ydata = self._ds[dsname][dim[1]]
else:
ydata = self._ds[dsname][use_var_for_y]
ydata_dim1 = self._ds[dsname][dim[1]]
if np.shape(ydata) != np.shape(ydata_dim1):
ydata = ydata_dim1
units = ytitle
if 'units' in ydata.attrs.keys():
units = ydata.attrs['units']
ytitle = ''.join(['(', units, ')'])
else:
units = ''
ytitle = dim[1]
# Create labels if 2d as 1d
if force_line_plot is True:
if labels is True:
labels = [' '.join([str(d), units]) for d in ydata.values]
ytitle = f"({data.attrs['units']})"
ydata = None
else:
ydata = None
# Get the current plotting axis, add day/night background and plot data
if self.fig is None:
self.fig = plt.figure()
if self.axes is None:
self.axes = np.array([plt.axes()])
self.fig.add_axes(self.axes[0])
# Set up secondary y axis if requested
if secondary_y is False:
ax = self.axes[subplot_index]
else:
ax = self.axes[subplot_index].twinx()
if colorbar_labels is not None:
flag_values = list(colorbar_labels.keys())
flag_meanings = [value['text'] for key, value in colorbar_labels.items()]
cbar_colors = [value['color'] for key, value in colorbar_labels.items()]
cmap = mpl.colors.ListedColormap(cbar_colors)
for ii, flag_meaning in enumerate(flag_meanings):
if len(flag_meaning) > 20:
flag_meaning = textwrap.fill(flag_meaning, width=20)
flag_meanings[ii] = flag_meaning
else:
flag_values = None
flag_meanings = None
cbar_colors = None
if ydata is None:
# Add in nans to ensure the data does not connect the line.
if add_nan is True:
xdata, data = data_utils.add_in_nan(xdata, data)
if day_night_background is True:
self.day_night_background(subplot_index=subplot_index, dsname=dsname)
# If limiting data being plotted use masked arrays
# Need to do it this way because of autoscale() method
if abs_limits[0] is not None and abs_limits[1] is not None:
data = np.ma.masked_outside(data, abs_limits[0], abs_limits[1])
elif abs_limits[0] is not None and abs_limits[1] is None:
data = np.ma.masked_less_equal(data, abs_limits[0])
elif abs_limits[0] is None and abs_limits[1] is not None:
data = np.ma.masked_greater_equal(data, abs_limits[1])
# Plot the data
if 'marker' not in kwargs.keys():
kwargs['marker'] = '.'
lines = ax.plot(xdata, data, **kwargs)
# Check if we need to call legend method after plotting. This is only
# called when no assessment overplot is called.
add_legend = False
if 'label' in kwargs.keys():
add_legend = True
# Overplot failing data if requested
if assessment_overplot:
# If we are doing forced line plot from 2D data need to manage
# legend lables. Will make arrays to hold labels of QC failing
# because not set when labels not set.
if not isinstance(labels, list) and add_legend is False:
labels = []
lines = []
# For forced line plot need to plot QC behind point instead of
# on top of point.
zorder = None
if force_line_plot or overplot_behind:
zorder = 0
overplot_markersize *= 2.0
for assessment, categories in assessment_overplot_category.items():
flag_data = self._ds[dsname].qcfilter.get_masked_data(
field, rm_assessments=categories, return_inverse=True
)
if np.invert(flag_data.mask).any() and np.isfinite(flag_data).any():
try:
flag_data.mask = np.logical_or(data.mask, flag_data.mask)
except AttributeError:
pass
qc_ax = ax.plot(
xdata,
flag_data,
marker=overplot_marker,
linestyle='',
markersize=overplot_markersize,
color=assessment_overplot_category_color[assessment],
label=assessment,
zorder=zorder,
)
# If labels keyword is set need to add labels for calling legend
if isinstance(labels, list):
# If plotting forced_line_plot need to subset the Line2D object
# so we don't have more than one added to legend.
if len(qc_ax) > 1:
lines.extend(qc_ax[:1])
else:
lines.extend(qc_ax)
labels.append(assessment)
add_legend = True
# Add legend if labels are available
if isinstance(labels, list):
ax.legend(lines, labels)
elif add_legend:
ax.legend()
# Change y axis to text from flag_meanings if requested.
if y_axis_flag_meanings:
flag_meanings = self._ds[dsname][field].attrs['flag_meanings']
flag_values = self._ds[dsname][field].attrs['flag_values']
# If keyword is larger than 1 assume this is the maximum character length
# desired and insert returns to wrap text.
if y_axis_flag_meanings > 1:
for ii, flag_meaning in enumerate(flag_meanings):
if len(flag_meaning) > y_axis_flag_meanings:
flag_meaning = textwrap.fill(flag_meaning, width=y_axis_flag_meanings)
flag_meanings[ii] = flag_meaning
ax.set_yticks(flag_values)
ax.set_yticklabels(flag_meanings)
else:
# Add in nans to ensure the data are not streaking
if add_nan is True:
xdata, data = data_utils.add_in_nan(xdata, data)
# Sets shading parameter to auto. Matplotlib will check deminsions.
# If X,Y and C are same deminsions shading is set to nearest.
# If X and Y deminsions are 1 greater than C shading is set to flat.
if 'edgecolors' not in kwargs.keys():
kwargs['edgecolors'] = 'face'
mesh = ax.pcolormesh(
np.asarray(xdata),
ydata,
data.transpose(),
shading=set_shading,
cmap=cmap,
**kwargs,
)
# Set Title
if set_title is None:
if isinstance(self._ds[dsname].time.values[0], np.datetime64):
set_title = ' '.join(
[
dsname,
field,
'on',
dt_utils.numpy_to_arm_date(self._ds[dsname].time.values[0]),
]
)
else:
date_result = search(
r'\d{4}-\d{1,2}-\d{1,2}', self._ds[dsname].time.attrs['units']
)
if date_result is not None:
set_title = ' '.join([dsname, field, 'on', date_result.group(0)])
else:
set_title = ' '.join([dsname, field])
if secondary_y is False:
ax.set_title(set_title)
# Set YTitle
if not y_axis_flag_meanings:
if match_line_label_color and len(ax.get_lines()) > 0:
ax.set_ylabel(ytitle, color=ax.get_lines()[0].get_color())
else:
ax.set_ylabel(ytitle)
# Set X Limit - We want the same time axes for all subplots
if not hasattr(self, 'time_rng'):
if time_rng is not None:
self.time_rng = list(time_rng)
else:
self.time_rng = [xdata.min().values, xdata.max().values]
self.set_xrng(self.time_rng, subplot_index)
# Set Y Limit
if y_rng is not None:
self.set_yrng(y_rng)
if hasattr(self, 'yrng'):
# Make sure that the yrng is not just the default
if ydata is None:
if abs_limits[0] is not None or abs_limits[1] is not None:
our_data = data
else:
our_data = data.values
else:
our_data = ydata
finite = np.isfinite(our_data)
if finite.any():
our_data = our_data[finite]
if invert_y_axis is False:
yrng = [np.min(our_data), np.max(our_data)]
else:
yrng = [np.max(our_data), np.min(our_data)]
else:
yrng = [0, 1]
# Check if current range is outside of new range an only set
# values that work for all data plotted.
if isinstance(yrng[0], np.datetime64):
yrng = mdates.datestr2num([str(yrng[0]), str(yrng[1])])
current_yrng = ax.get_ylim()
if invert_y_axis is False:
if yrng[0] > current_yrng[0]:
yrng[0] = current_yrng[0]
if yrng[1] < current_yrng[1]:
yrng[1] = current_yrng[1]
else:
if yrng[0] < current_yrng[0]:
yrng[0] = current_yrng[0]
if yrng[1] > current_yrng[1]:
yrng[1] = current_yrng[1]
# Set y range the normal way if not secondary y
# If secondary, just use set_ylim
if secondary_y is False:
self.set_yrng(yrng, subplot_index)
else:
ax.set_ylim(yrng)
# Set X Format
if len(subplot_index) == 1:
days = self.xrng[subplot_index, 1] - self.xrng[subplot_index, 0]
else:
days = self.xrng[subplot_index][1] - self.xrng[subplot_index][0]
myFmt = common.get_date_format(days)
ax.xaxis.set_major_formatter(myFmt)
# Set X format - We want the same time axes for all subplots
if not hasattr(self, 'time_fmt'):
self.time_fmt = myFmt
# Put on an xlabel, but only if we are making the bottom-most plot
if subplot_index[0] == self.axes.shape[0] - 1:
ax.set_xlabel('Time [UTC]')
if ydata is not None:
if cbar_label is None:
cbar_title = cbar_default
else:
cbar_title = ''.join(['(', cbar_label, ')'])
if colorbar_labels is not None:
cbar_title = None
cbar = self.add_colorbar(
mesh,
title=cbar_title,
subplot_index=subplot_index,
values=flag_values,
pad=cbar_h_adjust,
)
cbar.set_ticks(flag_values)
cbar.set_ticklabels(flag_meanings)
cbar.ax.tick_params(labelsize=10)
else:
self.add_colorbar(
mesh, title=cbar_title, subplot_index=subplot_index, pad=cbar_h_adjust
)
return ax
def plot_barbs_from_spd_dir(
self, speed_field, direction_field, pres_field=None, dsname=None, **kwargs
):
"""
This procedure will make a wind barb plot timeseries.
If a pressure field is given and the wind fields are 1D, which, for
example, would occur if one wants to plot a timeseries of
rawinsonde data, then a time-height cross section of
winds will be made.
Note: This procedure calls plot_barbs_from_u_v and will take in the
same keyword arguments as that procedure.
Parameters
----------
speed_field : str
The name of the field specifying the wind speed in m/s.
direction_field : str
The name of the field specifying the wind direction in degrees.
0 degrees is defined to be north and increases clockwise like
what is used in standard meteorological notation.
pres_field : str
The name of the field specifying pressure or height. If using
height coordinates, then we recommend setting invert_y_axis
to False.
dsname : str
The name of the datastream to plot. Setting to None will make
ACT attempt to autodetect this.
kwargs : dict
Any additional keyword arguments will be passed into
:func:`act.plotting.TimeSeriesDisplay.plot_barbs_from_u_and_v`.
Returns
-------
the_ax : matplotlib axis handle
The handle to the axis where the plot was made on.
Examples
--------
..code-block :: python
sonde_ds = act.io.armfiles.read_netcdf(
act.tests.sample_files.EXAMPLE_TWP_SONDE_WILDCARD)
BarbDisplay = act.plotting.TimeSeriesDisplay(
{'sonde_darwin': sonde_ds}, figsize=(10,5))
BarbDisplay.plot_barbs_from_spd_dir('deg', 'wspd', 'pres',
num_barbs_x=20)
"""
if dsname is None and len(self._ds.keys()) > 1:
raise ValueError(
'You must choose a datastream when there are 2 '
'or more datasets in the TimeSeriesDisplay '
'object.'
)
elif dsname is None:
dsname = list(self._ds.keys())[0]
# Make temporary field called tempu, tempv
spd = self._ds[dsname][speed_field]
dir = self._ds[dsname][direction_field]
tempu = -np.sin(np.deg2rad(dir)) * spd
tempv = -np.cos(np.deg2rad(dir)) * spd
self._ds[dsname]['temp_u'] = deepcopy(self._ds[dsname][speed_field])
self._ds[dsname]['temp_v'] = deepcopy(self._ds[dsname][speed_field])
self._ds[dsname]['temp_u'].values = tempu
self._ds[dsname]['temp_v'].values = tempv
the_ax = self.plot_barbs_from_u_v('temp_u', 'temp_v', pres_field, dsname, **kwargs)
del self._ds[dsname]['temp_u'], self._ds[dsname]['temp_v']
return the_ax
def plot_barbs_from_u_v(
self,
u_field,
v_field,
pres_field=None,
dsname=None,
subplot_index=(0,),
set_title=None,
day_night_background=False,
invert_y_axis=True,
num_barbs_x=20,
num_barbs_y=20,
use_var_for_y=None,
**kwargs,
):
"""
This function will plot a wind barb timeseries from u and v wind
data. If pres_field is given, a time-height series will be plotted
from 1-D wind data.
Parameters
----------
u_field : str
The name of the field containing the U component of the wind.
v_field : str
The name of the field containing the V component of the wind.
pres_field : str or None
The name of the field containing the pressure or height. Set
to None to not use this.
dsname : str or None
The name of the datastream to plot. Setting to None will make
ACT automatically try to determine this.
subplot_index : 2-tuple
The index of the subplot to make the plot on.
set_title : str or None
The title of the plot.
day_night_background : bool
Set to True to plot a day/night background.
invert_y_axis : bool
Set to True to invert the y axis (i.e. for plotting pressure as
the height coordinate).
num_barbs_x : int
The number of wind barbs to plot in the x axis.
num_barbs_y : int
The number of wind barbs to plot in the y axis.
cmap : matplotlib.colors.LinearSegmentedColormap
A color map to use with wind barbs. If this is set the plt.barbs
routine will be passed the C parameter scaled as sqrt of sum of the
squares and used with the passed in color map. A colorbar will also
be added. Setting the limits of the colorbar can be done with 'clim'.
Setting this changes the wind barbs from black to colors.
use_var_for_y : str
Set this to the name of a data variable in the Dataset to use as the
y-axis variable instead of the default dimension. Useful for instances
where data has an index-based dimension instead of a height-based
dimension. If shapes of arrays do not match it will automatically
revert back to the original ydata.
**kwargs : keyword arguments
Additional keyword arguments will be passed into plt.barbs.
Returns
-------
ax : matplotlib axis handle
The axis handle that contains the reference to the
constructed plot.
"""
if dsname is None and len(self._ds.keys()) > 1:
raise ValueError(
'You must choose a datastream when there are 2 '
'or more datasets in the TimeSeriesDisplay '
'object.'
)
elif dsname is None:
dsname = list(self._ds.keys())[0]
# Get data and dimensions
u = self._ds[dsname][u_field].values
v = self._ds[dsname][v_field].values
dim = list(self._ds[dsname][u_field].dims)
xdata = self._ds[dsname][dim[0]].values
num_x = xdata.shape[-1]
barb_step_x = round(num_x / num_barbs_x)
if barb_step_x == 0:
barb_step_x = 1
if len(dim) > 1 and pres_field is None:
if use_var_for_y is None:
ydata = self._ds[dsname][dim[1]]
else:
ydata = self._ds[dsname][use_var_for_y]
ydata_dim1 = self._ds[dsname][dim[1]]
if np.shape(ydata) != np.shape(ydata_dim1):
ydata = ydata_dim1
if 'units' in ydata.attrs:
units = ydata.attrs['units']
else:
units = ''
ytitle = ''.join(['(', units, ')'])
num_y = ydata.shape[0]
barb_step_y = round(num_y / num_barbs_y)
if barb_step_y == 0:
barb_step_y = 1
xdata, ydata = np.meshgrid(xdata, ydata, indexing='ij')
elif pres_field is not None:
# What we will do here is do a nearest-neighbor interpolation
# for each member of the series. Coordinates are time, pressure
pres = self._ds[dsname][pres_field]
u_interp = NearestNDInterpolator((xdata, pres.values), u, rescale=True)
v_interp = NearestNDInterpolator((xdata, pres.values), v, rescale=True)
barb_step_x = 1
barb_step_y = 1
x_times = pd.date_range(xdata.min(), xdata.max(), periods=num_barbs_x)
if num_barbs_y == 1:
y_levels = pres.mean()
else:
y_levels = np.linspace(np.nanmin(pres), np.nanmax(pres), num_barbs_y)
xdata, ydata = np.meshgrid(x_times, y_levels, indexing='ij')
u = u_interp(xdata, ydata)
v = v_interp(xdata, ydata)
if 'units' in pres.attrs:
units = pres.attrs['units']
else:
units = ''
ytitle = ''.join(['(', units, ')'])
else:
ydata = None
# Get the current plotting axis, add day/night background and plot data
if self.fig is None:
self.fig = plt.figure()
if self.axes is None:
self.axes = np.array([plt.axes()])
self.fig.add_axes(self.axes[0])
if ydata is None:
ydata = np.ones(xdata.shape)
if 'cmap' in kwargs.keys():
map_color = np.sqrt(np.power(u[::barb_step_x], 2) + np.power(v[::barb_step_x], 2))
map_color[np.isnan(map_color)] = 0
ax = self.axes[subplot_index].barbs(
xdata[::barb_step_x],
ydata[::barb_step_x],
u[::barb_step_x],
v[::barb_step_x],
map_color,
**kwargs,
)
plt.colorbar(
ax,
ax=[self.axes[subplot_index]],
label='Wind Speed (' + self._ds[dsname][u_field].attrs['units'] + ')',
)
else:
self.axes[subplot_index].barbs(
xdata[::barb_step_x],
ydata[::barb_step_x],
u[::barb_step_x],
v[::barb_step_x],
**kwargs,
)
self.axes[subplot_index].set_yticks([])
else:
if 'cmap' in kwargs.keys():
map_color = np.sqrt(
np.power(u[::barb_step_x, ::barb_step_y], 2)
+ np.power(v[::barb_step_x, ::barb_step_y], 2)
)
map_color[np.isnan(map_color)] = 0
ax = self.axes[subplot_index].barbs(
xdata[::barb_step_x, ::barb_step_y],
ydata[::barb_step_x, ::barb_step_y],
u[::barb_step_x, ::barb_step_y],
v[::barb_step_x, ::barb_step_y],
map_color,
**kwargs,
)
plt.colorbar(
ax,
ax=[self.axes[subplot_index]],
label='Wind Speed (' + self._ds[dsname][u_field].attrs['units'] + ')',
)
else:
ax = self.axes[subplot_index].barbs(
xdata[::barb_step_x, ::barb_step_y],
ydata[::barb_step_x, ::barb_step_y],
u[::barb_step_x, ::barb_step_y],
v[::barb_step_x, ::barb_step_y],
**kwargs,
)
if day_night_background is True:
self.day_night_background(subplot_index=subplot_index, dsname=dsname)
# Set Title
if set_title is None:
set_title = ' '.join(
[
dsname,
'on',
dt_utils.numpy_to_arm_date(self._ds[dsname].time.values[0]),
]
)
self.axes[subplot_index].set_title(set_title)
# Set YTitle
if 'ytitle' in locals():
self.axes[subplot_index].set_ylabel(ytitle)
# Set X Limit - We want the same time axes for all subplots
time_rng = [xdata.min(), xdata.max()]
self.set_xrng(time_rng, subplot_index)
# Set Y Limit
if hasattr(self, 'yrng'):
# Make sure that the yrng is not just the default
if not np.all(self.yrng[subplot_index] == 0):
self.set_yrng(self.yrng[subplot_index], subplot_index)
else:
if ydata is None:
our_data = xdata
else:
our_data = ydata
if np.isfinite(our_data).any():
if invert_y_axis is False:
yrng = [np.nanmin(our_data), np.nanmax(our_data)]
else:
yrng = [np.nanmax(our_data), np.nanmin(our_data)]
else:
yrng = [0, 1]
self.set_yrng(yrng, subplot_index)
# Set X Format
if len(subplot_index) == 1:
days = self.xrng[subplot_index, 1] - self.xrng[subplot_index, 0]
else:
days = (
self.xrng[subplot_index[0], subplot_index[1], 1]
- self.xrng[subplot_index[0], subplot_index[1], 0]
)
# Put on an xlabel, but only if we are making the bottom-most plot
if subplot_index[0] == self.axes.shape[0] - 1:
self.axes[subplot_index].set_xlabel('Time [UTC]')
myFmt = common.get_date_format(days)
self.axes[subplot_index].xaxis.set_major_formatter(myFmt)
return self.axes[subplot_index]
def plot_time_height_xsection_from_1d_data(
self,
data_field,
pres_field,
dsname=None,
subplot_index=(0,),
set_title=None,
day_night_background=False,
num_time_periods=20,
num_y_levels=20,
invert_y_axis=True,
cbar_label=None,
set_shading='auto',
**kwargs,
):
"""
This will plot a time-height cross section from 1D datasets using
nearest neighbor interpolation on a regular time by height grid.
All that is needed are a data variable and a height variable.
Parameters
----------
data_field : str
The name of the field to plot.
pres_field : str
The name of the height or pressure field to plot.
dsname : str or None
The name of the datastream to plot
subplot_index : 2-tuple
The index of the subplot to create the plot on.
set_title : str or None
The title of the plot.
day_night_background : bool
Set to true to plot the day/night background.
num_time_periods : int
Set to determine how many time periods. Setting to None
will do one time period per day.
num_y_levels : int
The number of levels in the y axis to use.
invert_y_axis : bool
Set to true to invert the y-axis (recommended for
pressure coordinates).
cbar_label : str
Option to overwrite default colorbar label.
set_shading : string
Option to to set the matplotlib.pcolormesh shading parameter.
Default to 'auto'
**kwargs : keyword arguments
Additional keyword arguments will be passed
into :func:`plt.pcolormesh`
Returns
-------
ax : matplotlib axis handle
The matplotlib axis handle pointing to the plot.
"""
if dsname is None and len(self._ds.keys()) > 1:
raise ValueError(
'You must choose a datastream when there are 2'
'or more datasets in the TimeSeriesDisplay'
'object.'
)
elif dsname is None:
dsname = list(self._ds.keys())[0]
dim = list(self._ds[dsname][data_field].dims)
if len(dim) > 1:
raise ValueError(
'plot_time_height_xsection_from_1d_data only '
'supports 1-D datasets. For datasets with 2 or '
'more dimensions use plot().'
)
# Get data and dimensions
data = self._ds[dsname][data_field].values
xdata = self._ds[dsname][dim[0]].values
# What we will do here is do a nearest-neighbor interpolation for each
# member of the series. Coordinates are time, pressure
pres = self._ds[dsname][pres_field]
u_interp = NearestNDInterpolator((xdata, pres.values), data, rescale=True)
# Mask points where we have no data
# Count number of unique days
x_times = pd.date_range(xdata.min(), xdata.max(), periods=num_time_periods)
y_levels = np.linspace(np.nanmin(pres), np.nanmax(pres), num_y_levels)
tdata, ydata = np.meshgrid(x_times, y_levels, indexing='ij')
data = u_interp(tdata, ydata)
ytitle = ''.join(['(', pres.attrs['units'], ')'])
units = data_field + ' (' + self._ds[dsname][data_field].attrs['units'] + ')'
# Get the current plotting axis, add day/night background and plot data
if self.fig is None:
self.fig = plt.figure()
if self.axes is None:
self.axes = np.array([plt.axes()])
self.fig.add_axes(self.axes[0])
mesh = self.axes[subplot_index].pcolormesh(
x_times, y_levels, np.transpose(data), shading=set_shading, **kwargs
)
if day_night_background is True:
self.day_night_background(subplot_index=subplot_index, dsname=dsname)
# Set Title
if set_title is None:
set_title = ' '.join(
[
dsname,
'on',
dt_utils.numpy_to_arm_date(self._ds[dsname].time.values[0]),
]
)
self.axes[subplot_index].set_title(set_title)
# Set YTitle
if 'ytitle' in locals():
self.axes[subplot_index].set_ylabel(ytitle)
# Set X Limit - We want the same time axes for all subplots
time_rng = [x_times[0], x_times[-1]]
self.set_xrng(time_rng, subplot_index)
# Set Y Limit
if hasattr(self, 'yrng'):
# Make sure that the yrng is not just the default
if not np.all(self.yrng[subplot_index] == 0):
self.set_yrng(self.yrng[subplot_index], subplot_index)
else:
if ydata is None:
our_data = data.values
else:
our_data = ydata
if np.isfinite(our_data).any():
if invert_y_axis is False:
yrng = [np.nanmin(our_data), np.nanmax(our_data)]
else:
yrng = [np.nanmax(our_data), np.nanmin(our_data)]
else:
yrng = [0, 1]
self.set_yrng(yrng, subplot_index)
# Set X Format
if len(subplot_index) == 1:
days = self.xrng[subplot_index, 1] - self.xrng[subplot_index, 0]
else:
days = (
self.xrng[subplot_index[0], subplot_index[1], 1]
- self.xrng[subplot_index[0], subplot_index[1], 0]
)
# Put on an xlabel, but only if we are making the bottom-most plot
if subplot_index[0] == self.axes.shape[0] - 1:
self.axes[subplot_index].set_xlabel('Time [UTC]')
if ydata is not None:
if cbar_label is None:
self.add_colorbar(mesh, title=units, subplot_index=subplot_index)
else:
self.add_colorbar(mesh, title=cbar_label, subplot_index=subplot_index)
myFmt = common.get_date_format(days)
self.axes[subplot_index].xaxis.set_major_formatter(myFmt)
return self.axes[subplot_index]
def time_height_scatter(
self,
data_field=None,
dsname=None,
cmap='rainbow',
alt_label=None,
alt_field='alt',
cb_label=None,
**kwargs,
):
"""
Create a time series plot of altitude and data variable with
color also indicating value with a color bar. The Color bar is
positioned to serve both as the indicator of the color intensity
and the second y-axis.
Parameters
----------
data_field : str
Name of data field in the dataset to plot on second y-axis.
height_field : str
Name of height field in the dataset to plot on first y-axis.
dsname : str or None
The name of the datastream to plot.
cmap : str
Colorbar color map to use.
alt_label : str
Altitude first y-axis label to use. If None, will try to use
long_name and units.
alt_field : str
Label for field in the dataset to plot on first y-axis.
cb_label : str
Colorbar label to use. If not set will try to use
long_name and units.
**kwargs : keyword arguments
Any other keyword arguments that will be passed
into TimeSeriesDisplay.plot module when the figure
is made.
"""
if dsname is None and len(self._ds.keys()) > 1:
raise ValueError(
'You must choose a datastream when there are 2 '
'or more datasets in the TimeSeriesDisplay '
'object.'
)
elif dsname is None:
dsname = list(self._ds.keys())[0]
# Get data and dimensions
data = self._ds[dsname][data_field]
altitude = self._ds[dsname][alt_field]
dim = list(self._ds[dsname][data_field].dims)
xdata = self._ds[dsname][dim[0]]
if alt_label is None:
try:
alt_label = altitude.attrs['long_name'] + ''.join(
[' (', altitude.attrs['units'], ')']
)
except KeyError:
alt_label = alt_field
if cb_label is None:
try:
cb_label = data.attrs['long_name'] + ''.join([' (', data.attrs['units'], ')'])
except KeyError:
cb_label = data_field
colorbar_map = plt.cm.get_cmap(cmap)
self.fig.subplots_adjust(left=0.1, right=0.86, bottom=0.16, top=0.91)
ax1 = self.plot(alt_field, color='black', **kwargs)
ax1.set_ylabel(alt_label)
ax2 = ax1.twinx()
sc = ax2.scatter(xdata.values, data.values, c=data.values, marker='.', cmap=colorbar_map)
cbaxes = self.fig.add_axes(
[
self.fig.subplotpars.right + 0.02,
self.fig.subplotpars.bottom,
0.02,
self.fig.subplotpars.top - self.fig.subplotpars.bottom,
]
)
cbar = plt.colorbar(sc, cax=cbaxes)
ax2.set_ylim(cbar.mappable.get_clim())
cbar.ax.set_ylabel(cb_label)
ax2.set_yticklabels([])
return self.axes[0]
def qc_flag_block_plot(
self,
data_field=None,
dsname=None,
subplot_index=(0,),
time_rng=None,
assessment_color=None,
edgecolor='face',
set_shading='auto',
cb_friendly=False,
**kwargs,
):
"""
Create a time series plot of embedded quality control values
using broken barh plotting.
Parameters
----------
data_field : str
Name of data field in the dataset to plot corresponding quality
control.
dsname : None or str
If there is more than one datastream in the display object the
name of the datastream needs to be specified. If set to None and
there is only one datastream ACT will use the sole datastream
in the object.
subplot_index : 1 or 2D tuple, list, or array
The index of the subplot to set the x range of.
time_rng : tuple or list
List or tuple with (min, max) values to set the x-axis range limits.
assessment_color : dict
Dictionary lookup to override default assessment to color. Make sure
assessment work is correctly set with case syntax.
edgecolor : str or list
Color name, list of color names or 'face' as defined in matplotlib.axes.Axes.broken_barh
set_shading : string
Option to to set the matplotlib.pcolormesh shading parameter.
Default to 'auto'
cb_friendly : boolean
Set to true if you want to use the integrated colorblind friendly
colors for green/red based on the Homeyer colormap
**kwargs : keyword arguments
The keyword arguments for :func:`plt.broken_barh`.
"""
# Color to plot associated with assessment.
color_lookup = {
'Bad': 'red',
'Incorrect': 'red',
'Indeterminate': 'orange',
'Suspect': 'orange',
'Missing': 'darkgray',
'Not Failing': 'green',
'Acceptable': 'green',
}
if cb_friendly:
color_lookup['Bad'] = (0.9285714285714286, 0.7130901016453677, 0.7130901016453677)
color_lookup['Incorrect'] = (0.9285714285714286, 0.7130901016453677, 0.7130901016453677)
color_lookup['Not Failing'] = (0.0, 0.4240129715562796, 0.4240129715562796),
color_lookup['Acceptable'] = (0.0, 0.4240129715562796, 0.4240129715562796),
if assessment_color is not None:
for asses, color in assessment_color.items():
color_lookup[asses] = color
if asses == 'Incorrect':
color_lookup['Bad'] = color
if asses == 'Suspect':
color_lookup['Indeterminate'] = color
# Set up list of test names to use for missing values
missing_val_long_names = [
'Value equal to missing_value*',
'Value set to missing_value*',
'Value is equal to missing_value*',
'Value is set to missing_value*',
]
if dsname is None and len(self._ds.keys()) > 1:
raise ValueError(
'You must choose a datastream when there are 2 '
'or more datasets in the TimeSeriesDisplay '
'object.'
)
elif dsname is None:
dsname = list(self._ds.keys())[0]
# Set up or get current plot figure
if self.fig is None:
self.fig = plt.figure()
# Set up or get current axes
if self.axes is None:
self.axes = np.array([plt.axes()])
self.fig.add_axes(self.axes[0])
ax = self.axes[subplot_index]
# Set X Limit - We want the same time axes for all subplots
data = self._ds[dsname][data_field]
dim = list(self._ds[dsname][data_field].dims)
xdata = self._ds[dsname][dim[0]]
# Get data and attributes
qc_data_field = self._ds[dsname].qcfilter.check_for_ancillary_qc(
data_field, add_if_missing=False, cleanup=False
)
if qc_data_field is None:
raise ValueError(f'No quality control ancillary variable in Dataset for {data_field}')
flag_masks = self._ds[dsname][qc_data_field].attrs['flag_masks']
flag_meanings = self._ds[dsname][qc_data_field].attrs['flag_meanings']
flag_assessments = self._ds[dsname][qc_data_field].attrs['flag_assessments']
# Get time ranges for green blocks
time_delta = determine_time_delta(xdata.values)
barh_list_green = reduce_time_ranges(xdata.values, time_delta=time_delta, broken_barh=True)
# Set background to gray indicating not available data
ax.set_facecolor('dimgray')
# Check if plotting 2D data vs 1D data. 2D data will be summarized by
# assessment category instead of showing each test.
data_shape = self._ds[dsname][qc_data_field].shape
if len(data_shape) > 1:
cur_assessments = list(set(flag_assessments))
cur_assessments.sort()
cur_assessments.reverse()
qc_data = np.full(data_shape, -1, dtype=np.int16)
plot_colors = []
tick_names = []
index = self._ds[dsname][qc_data_field].values == 0
if index.any():
qc_data[index] = 0
plot_colors.append(color_lookup['Not Failing'])
tick_names.append('Not Failing')
for ii, assess in enumerate(cur_assessments):
if assess not in color_lookup:
color_lookup[assess] = list(mplcolors.CSS4_COLORS.keys())[ii]
ii += 1
assess_data = self._ds[dsname].qcfilter.get_masked_data(
data_field, rm_assessments=assess
)
if assess_data.mask.any():
qc_data[assess_data.mask] = ii
plot_colors.append(color_lookup[assess])
tick_names.append(assess)
# Overwrite missing data. Not sure if we want to do this because VAPs set
# the value to missing but the test is set to Bad. This tries to overcome that
# by looking for correct test description that would only indicate the values
# are missing not that they are set to missing by a test... most likely.
missing_test_nums = []
for ii, flag_meaning in enumerate(flag_meanings):
# Check if the bit set is indicating missing data.
for val in missing_val_long_names:
if re_search(val, flag_meaning):
test_num = parse_bit(flag_masks[ii])[0]
missing_test_nums.append(test_num)
assess_data = self._ds[dsname].qcfilter.get_masked_data(
data_field, rm_tests=missing_test_nums
)
if assess_data.mask.any():
qc_data[assess_data.mask] = -1
plot_colors.append(color_lookup['Missing'])
tick_names.append('Missing')
# Create a masked array to allow not plotting where values are missing
qc_data = np.ma.masked_equal(qc_data, -1)
dims = self._ds[dsname][qc_data_field].dims
xvalues = self._ds[dsname][dims[0]].values
yvalues = self._ds[dsname][dims[1]].values
cMap = mplcolors.ListedColormap(plot_colors)
mesh = ax.pcolormesh(
xvalues,
yvalues,
np.transpose(qc_data),
cmap=cMap,
vmin=0,
shading=set_shading,
)
divider = make_axes_locatable(ax)
# Determine correct placement of words on colorbar
tick_nums = (
np.arange(0, len(tick_names) * 2 + 1) / (len(tick_names) * 2) * np.nanmax(qc_data)
)[1::2]
cax = divider.append_axes('bottom', size='5%', pad=0.3)
cbar = self.fig.colorbar(
mesh,
cax=cax,
orientation='horizontal',
spacing='uniform',
ticks=tick_nums,
shrink=0.5,
)
cbar.ax.set_xticklabels(tick_names)
# Set YTitle
dim_name = list(set(self._ds[dsname][qc_data_field].dims) - {'time'})
try:
ytitle = f"{dim_name[0]} ({self._ds[dsname][dim_name[0]].attrs['units']})"
ax.set_ylabel(ytitle)
except KeyError:
pass
# Add which tests were set as text to the plot
unique_values = []
for ii in np.unique(self._ds[dsname][qc_data_field].values):
unique_values.extend(parse_bit(ii))
if len(unique_values) > 0:
unique_values = list(set(unique_values))
unique_values.sort()
unique_values = [str(ii) for ii in unique_values]
self.fig.text(
0.5,
-0.35,
f"QC Tests Tripped: {', '.join(unique_values)}",
transform=ax.transAxes,
horizontalalignment='center',
verticalalignment='center',
fontweight='bold',
)
else:
test_nums = []
for ii, assess in enumerate(flag_assessments):
if assess not in color_lookup:
color_lookup[assess] = list(mplcolors.CSS4_COLORS.keys())[ii]
# Plot green data first.
ax.broken_barh(
barh_list_green,
(ii, ii + 1),
facecolors=color_lookup['Not Failing'],
edgecolor=edgecolor,
**kwargs,
)
# Get test number from flag_mask bitpacked number
test_nums.append(parse_bit(flag_masks[ii]))
# Get masked array data to use mask for finding if/where test is set
data = self._ds[dsname].qcfilter.get_masked_data(
data_field, rm_tests=test_nums[-1]
)
if np.any(data.mask):
# Get time ranges from time and masked data
barh_list = reduce_time_ranges(
xdata.values[data.mask], time_delta=time_delta, broken_barh=True
)
# Check if the bit set is indicating missing data. If so change
# to different plotting color than what is in flag_assessments.
for val in missing_val_long_names:
if re_search(val, flag_meanings[ii]):
assess = 'Missing'
break
# Lay down blocks of tripped tests using correct color
ax.broken_barh(
barh_list,
(ii, ii + 1),
facecolors=color_lookup[assess],
edgecolor=edgecolor,
**kwargs,
)
# Add test description to plot.
ax.text(xdata.values[0], ii + 0.5, ' ' + flag_meanings[ii], va='center')
# Change y ticks to test number
plt.yticks(
[ii + 0.5 for ii in range(0, len(test_nums))],
labels=['Test ' + str(ii[0]) for ii in test_nums],
)
# Set ylimit to number of tests plotted
ax.set_ylim(0, len(flag_assessments))
# Set X Limit - We want the same time axes for all subplots
if not hasattr(self, 'time_rng'):
if time_rng is not None:
self.time_rng = list(time_rng)
else:
self.time_rng = [xdata.min().values, xdata.max().values]
self.set_xrng(self.time_rng, subplot_index)
# Get X format - We want the same time axes for all subplots
if hasattr(self, 'time_fmt'):
ax.xaxis.set_major_formatter(self.time_fmt)
else:
# Set X Format
if len(subplot_index) == 1:
days = self.xrng[subplot_index, 1] - self.xrng[subplot_index, 0]
else:
days = (
self.xrng[subplot_index[0], subplot_index[1], 1]
- self.xrng[subplot_index[0], subplot_index[1], 0]
)
myFmt = common.get_date_format(days)
ax.xaxis.set_major_formatter(myFmt)
self.time_fmt = myFmt
return self.axes[subplot_index]
def fill_between(
self,
field,
dsname=None,
subplot_index=(0,),
set_title=None,
secondary_y=False,
**kwargs,
):
"""
Makes a fill_between plot, based on matplotlib
Parameters
----------
field : str
The name of the field to plot.
dsname : None or str
If there is more than one datastream in the display object the
name of the datastream needs to be specified. If set to None and
there is only one datastream ACT will use the sole datastream
in the object.
subplot_index : 1 or 2D tuple, list, or array
The index of the subplot to set the x range of.
set_title : str
The title for the plot.
secondary_y : boolean
Option to indicate if the data should be plotted on second y-axis.
**kwargs : keyword arguments
The keyword arguments for :func:`plt.plot` (1D timeseries) or
:func:`plt.pcolormesh` (2D timeseries).
Returns
-------
ax : matplotlib axis handle
The matplotlib axis handle of the plot.
"""
if dsname is None and len(self._ds.keys()) > 1:
raise ValueError(
'You must choose a datastream when there are 2 '
'or more datasets in the TimeSeriesDisplay '
'object.'
)
elif dsname is None:
dsname = list(self._ds.keys())[0]
# Get data and dimensions
data = self._ds[dsname][field]
dim = list(self._ds[dsname][field].dims)
xdata = self._ds[dsname][dim[0]]
if 'units' in data.attrs:
ytitle = ''.join(['(', data.attrs['units'], ')'])
else:
ytitle = field
# Get the current plotting axis, add day/night background and plot data
if self.fig is None:
self.fig = plt.figure()
if self.axes is None:
self.axes = np.array([plt.axes()])
self.fig.add_axes(self.axes[0])
# Set ax to appropriate axis
if secondary_y is False:
ax = self.axes[subplot_index]
else:
ax = self.axes[subplot_index].twinx()
ax.fill_between(xdata.values, data, **kwargs)
# Set X Format
if len(subplot_index) == 1:
days = self.xrng[subplot_index, 1] - self.xrng[subplot_index, 0]
else:
days = (
self.xrng[subplot_index[0], subplot_index[1], 1]
- self.xrng[subplot_index[0], subplot_index[1], 0]
)
myFmt = common.get_date_format(days)
ax.xaxis.set_major_formatter(myFmt)
# Set X format - We want the same time axes for all subplots
if not hasattr(self, 'time_fmt'):
self.time_fmt = myFmt
# Put on an xlabel, but only if we are making the bottom-most plot
if subplot_index[0] == self.axes.shape[0] - 1:
self.axes[subplot_index].set_xlabel('Time [UTC]')
# Set YTitle
ax.set_ylabel(ytitle)
# Set Title
if set_title is None:
set_title = ' '.join(
[
dsname,
field,
'on',
dt_utils.numpy_to_arm_date(self._ds[dsname].time.values[0]),
]
)
if secondary_y is False:
ax.set_title(set_title)
return self.axes[subplot_index]
<|code_end|>
|
act/plotting/act_cmap.py
<|code_start|>"""
Available colormaps (reversed versions also provided), these
colormaps are available within matplotlib with names act_COLORMAP':
* HomeyerRainbow
"""
import matplotlib as mpl
import matplotlib.cm
import matplotlib.colors as colors
from ._act_cmap import datad, yuv_rainbow_24
def _reverser(f):
"""perform reversal."""
def freversed(x):
"""f specific reverser."""
return f(1 - x)
return freversed
def revcmap(data):
"""Can only handle specification *data* in dictionary format."""
data_r = {}
for key, val in data.items():
if callable(val):
valnew = _reverser(val)
# This doesn't work: lambda x: val(1-x)
# The same "val" (the first one) is used
# each time, so the colors are identical
# and the result is shades of gray.
else:
# Flip x and exchange the y values facing x = 0 and x = 1.
valnew = [(1.0 - x, y1, y0) for x, y0, y1 in reversed(val)]
data_r[key] = valnew
return data_r
def _reverse_cmap_spec(spec):
"""Reverses cmap specification *spec*, can handle both dict and tuple
type specs."""
if isinstance(spec, dict) and 'red' in spec.keys():
return revcmap(spec)
else:
revspec = list(reversed(spec))
if len(revspec[0]) == 2: # e.g., (1, (1.0, 0.0, 1.0))
revspec = [(1.0 - a, b) for a, b in revspec]
return revspec
def _generate_cmap(name, lutsize):
"""Generates the requested cmap from it's name *name*. The lut size is
*lutsize*."""
spec = datad[name]
# Generate the colormap object.
if isinstance(spec, dict) and 'red' in spec.keys():
return colors.LinearSegmentedColormap(name, spec, lutsize)
else:
return colors.LinearSegmentedColormap.from_list(name, spec, lutsize)
cmap_d = dict()
LUTSIZE = mpl.rcParams['image.lut']
# need this list because datad is changed in loop
_cmapnames = list(datad.keys())
# Generate the reversed specifications ...
for cmapname in _cmapnames:
spec = datad[cmapname]
spec_reversed = _reverse_cmap_spec(spec)
datad[cmapname + '_r'] = spec_reversed
# Precache the cmaps with ``lutsize = LUTSIZE`` ...
# Use datad.keys() to also add the reversed ones added in the section above:
for cmapname in datad.keys():
cmap_d[cmapname] = _generate_cmap(cmapname, LUTSIZE)
locals().update(cmap_d)
# register the colormaps so that they can be accessed with the names act_XXX
for name, cmap in cmap_d.items():
full_name = 'act_' + name
try:
matplotlib.colormaps.register(name=full_name, cmap=cmap, force=True)
except AttributeError:
matplotlib.cm.register_cmap(name=full_name, cmap=cmap)
<|code_end|>
act/plotting/timeseriesdisplay.py
<|code_start|>"""
Stores the class for TimeSeriesDisplay.
"""
import datetime as dt
import textwrap
import warnings
from copy import deepcopy
from re import search, search as re_search
import matplotlib as mpl
import matplotlib.dates as mdates
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from matplotlib import colors as mplcolors
from mpl_toolkits.axes_grid1 import make_axes_locatable
from scipy.interpolate import NearestNDInterpolator
from ..qc.qcfilter import parse_bit
from ..utils import data_utils, datetime_utils as dt_utils
from ..utils.datetime_utils import determine_time_delta, reduce_time_ranges
from ..utils.geo_utils import get_sunrise_sunset_noon
from . import common
from .plot import Display
class TimeSeriesDisplay(Display):
"""
This subclass contains routines that are specific to plotting
time series plots from data. It is inherited from Display and therefore
contains all of Display's attributes and methods.
Examples
--------
To create a TimeSeriesDisplay with 3 rows, simply do:
.. code-block:: python
ds = act.read_netcdf(the_file)
disp = act.plotting.TimeSeriesDisplay(ds, subplot_shape=(3,), figsize=(15, 5))
The TimeSeriesDisplay constructor takes in the same keyword arguments as
plt.subplots. For more information on the plt.subplots keyword arguments,
see the `matplotlib documentation
<https://matplotlib.org/api/_as_gen/matplotlib.pyplot.subplots.html>`_.
If no subplot_shape is provided, then no figure or axis will be created
until add_subplots or plots is called.
"""
def __init__(self, ds, subplot_shape=(1,), ds_name=None, **kwargs):
super().__init__(ds, subplot_shape, ds_name, **kwargs)
def day_night_background(self, dsname=None, subplot_index=(0,)):
"""
Colorcodes the background according to sunrise/sunset.
Parameters
----------
dsname : None or str
If there is more than one datastream in the display object the
name of the datastream needs to be specified. If set to None and
there is only one datastream then ACT will use the sole datastream
in the object.
subplot_index : 1 or 2D tuple, list, or array
The index to the subplot to place the day and night background in.
"""
if dsname is None and len(self._ds.keys()) > 1:
raise ValueError(
'You must choose a datastream to derive the '
+ 'information needed for the day and night '
+ 'background when 2 or more datasets are in '
+ 'the display object.'
)
elif dsname is None:
dsname = list(self._ds.keys())[0]
# Get File Dates
try:
file_dates = self._ds[dsname].attrs['_file_dates']
except KeyError:
file_dates = []
if len(file_dates) == 0:
sdate = dt_utils.numpy_to_arm_date(self._ds[dsname].time.values[0])
edate = dt_utils.numpy_to_arm_date(self._ds[dsname].time.values[-1])
file_dates = [sdate, edate]
all_dates = dt_utils.dates_between(file_dates[0], file_dates[-1])
if self.axes is None:
raise RuntimeError('day_night_background requires the plot to ' 'be displayed.')
ax = self.axes[subplot_index]
# Find variable names for latitude and longitude
variables = list(self._ds[dsname].data_vars)
lat_name = [var for var in ['lat', 'latitude'] if var in variables]
lon_name = [var for var in ['lon', 'longitude'] if var in variables]
if len(lat_name) == 0:
lat_name = None
else:
lat_name = lat_name[0]
if len(lon_name) == 0:
lon_name = None
else:
lon_name = lon_name[0]
# Variable name does not match, look for standard_name declaration
if lat_name is None or lon_name is None:
for var in variables:
try:
if self._ds[dsname][var].attrs['standard_name'] == 'latitude':
lat_name = var
except KeyError:
pass
try:
if self._ds[dsname][var].attrs['standard_name'] == 'longitude':
lon_name = var
except KeyError:
pass
if lat_name is not None and lon_name is not None:
break
if lat_name is None or lon_name is None:
return
# Extract latitude and longitude scalar from variable. If variable is a vector look
# for first non-Nan value.
lat_lon_list = [np.nan, np.nan]
for ii, var_name in enumerate([lat_name, lon_name]):
try:
values = self._ds[dsname][var_name].values
if values.size == 1:
lat_lon_list[ii] = float(values)
else:
# Look for non-NaN values to use for latitude locaiton. If not found use first value.
index = np.where(np.isfinite(values))[0]
if index.size == 0:
lat_lon_list[ii] = float(values[0])
else:
lat_lon_list[ii] = float(values[index[0]])
except AttributeError:
pass
for value, name in zip(lat_lon_list, ['Latitude', 'Longitude']):
if not np.isfinite(value):
warnings.warn(f"{name} value in dataset equal to '{value}' is not finite. ", RuntimeWarning)
return
lat = lat_lon_list[0]
lon = lat_lon_list[1]
lat_range = [-90, 90]
if not (lat_range[0] <= lat <= lat_range[1]):
warnings.warn(
f"Latitude value in dataset of '{lat}' not within acceptable "
f'range of {lat_range[0]} <= latitude <= {lat_range[1]}. ',
RuntimeWarning,
)
return
lon_range = [-180, 180]
if not (lon_range[0] <= lon <= lon_range[1]):
warnings.warn(
f"Longitude value in dataset of '{lon}' not within acceptable "
f'range of {lon_range[0]} <= longitude <= {lon_range[1]}. ',
RuntimeWarning,
)
return
# Initialize the plot to a gray background for total darkness
rect = ax.patch
rect.set_facecolor('0.85')
# Get date ranges to plot
plot_dates = []
for f in all_dates:
for ii in [-1, 0, 1]:
plot_dates.append(f + dt.timedelta(days=ii))
# Get sunrise, sunset and noon times
sunrise, sunset, noon = get_sunrise_sunset_noon(lat, lon, plot_dates)
# Plot daylight
for ii in range(0, len(sunrise)):
ax.axvspan(sunrise[ii], sunset[ii], facecolor='#FFFFCC', zorder=0)
# Plot noon line
for ii in noon:
ax.axvline(x=ii, linestyle='--', color='y', zorder=1)
def set_xrng(self, xrng, subplot_index=(0,)):
"""
Sets the x range of the plot.
Parameters
----------
xrng : 2 number array
The x limits of the plot.
subplot_index : 1 or 2D tuple, list, or array
The index of the subplot to set the x range of.
"""
if self.axes is None:
raise RuntimeError('set_xrng requires the plot to be displayed.')
# If the xlim is set to the same value for range it will throw a warning
# This is to catch that and expand the range so we avoid the warning.
if xrng[0] == xrng[1]:
if isinstance(xrng[0], np.ndarray) and np.issubdtype(xrng[0].dtype, np.datetime64):
print(f'\nAttempting to set xlim range to single value {xrng[0]}. '
'Expanding range by 2 seconds.\n')
xrng[0] = xrng[0] - np.timedelta64(1, 's')
xrng[1] = xrng[1] + np.timedelta64(1, 's')
elif isinstance(xrng[0], dt.datetime):
print(f'\nAttempting to set xlim range to single value {xrng[0]}. '
'Expanding range by 2 seconds.\n')
xrng[0] = xrng[0] - dt.timedelta(seconds=1)
xrng[1] = xrng[1] + dt.timedelta(seconds=1)
self.axes[subplot_index].set_xlim(xrng)
# Make sure that the xrng value is a numpy array not pandas
if isinstance(xrng[0], pd.Timestamp):
xrng = [x.to_numpy() for x in xrng if isinstance(x, pd.Timestamp)]
# Make sure that the xrng value is a numpy array not datetime.datetime
if isinstance(xrng[0], dt.datetime):
xrng = [np.datetime64(x) for x in xrng if isinstance(x, dt.datetime)]
if len(subplot_index) < 2:
self.xrng[subplot_index, 0] = xrng[0].astype('datetime64[D]').astype(float)
self.xrng[subplot_index, 1] = xrng[1].astype('datetime64[D]').astype(float)
else:
self.xrng[subplot_index][0] = xrng[0].astype('datetime64[D]').astype(float)
self.xrng[subplot_index][1] = xrng[1].astype('datetime64[D]').astype(float)
def set_yrng(self, yrng, subplot_index=(0,)):
"""
Sets the y range of the plot.
Parameters
----------
yrng : 2 number array
The y limits of the plot.
subplot_index : 1 or 2D tuple, list, or array
The index of the subplot to set the x range of.
"""
if self.axes is None:
raise RuntimeError('set_yrng requires the plot to be displayed.')
if not hasattr(self, 'yrng') and len(self.axes.shape) == 2:
self.yrng = np.zeros((self.axes.shape[0], self.axes.shape[1], 2))
elif not hasattr(self, 'yrng') and len(self.axes.shape) == 1:
self.yrng = np.zeros((self.axes.shape[0], 2))
if yrng[0] == yrng[1]:
yrng[1] = yrng[1] + 1
self.axes[subplot_index].set_ylim(yrng)
try:
self.yrng[subplot_index, :] = yrng
except IndexError:
self.yrng[subplot_index] = yrng
def plot(
self,
field,
dsname=None,
subplot_index=(0,),
cmap=None,
set_title=None,
add_nan=False,
day_night_background=False,
invert_y_axis=False,
abs_limits=(None, None),
time_rng=None,
y_rng=None,
use_var_for_y=None,
set_shading='auto',
assessment_overplot=False,
overplot_marker='.',
overplot_behind=False,
overplot_markersize=6,
assessment_overplot_category={
'Incorrect': ['Bad', 'Incorrect'],
'Suspect': ['Indeterminate', 'Suspect'],
},
assessment_overplot_category_color={'Incorrect': 'red', 'Suspect': 'orange'},
force_line_plot=False,
labels=False,
cbar_label=None,
cbar_h_adjust=None,
secondary_y=False,
y_axis_flag_meanings=False,
colorbar_labels=None,
cb_friendly=False,
match_line_label_color=False,
**kwargs,
):
"""
Makes a timeseries plot. If subplots have not been added yet, an axis
will be created assuming that there is only going to be one plot.
If plotting a high data volume 2D dataset, it may take some time to plot.
In order to speed up your plot creation, please resample your data to a
lower resolution dataset.
Parameters
----------
field : str
The name of the field to plot.
dsname : None or str
If there is more than one datastream in the display object the
name of the datastream needs to be specified. If set to None and
there is only one datastream ACT will use the sole datastream
in the object.
subplot_index : 1 or 2D tuple, list, or array
The index of the subplot to set the x range of.
cmap : matplotlib colormap
The colormap to use.
set_title : str
The title for the plot.
add_nan : bool
Set to True to fill in data gaps with NaNs.
day_night_background : bool
Set to True to fill in a color coded background.
according to the time of day.
abs_limits : tuple or list
Sets the bounds on plot limits even if data values exceed
those limits. Set to (ymin,ymax). Use None if only setting
minimum or maximum limit, i.e. (22., None).
time_rng : tuple or list
List or tuple with (min, max) values to set the x-axis range
limits.
y_rng : tuple or list
List or tuple with (min, max) values to set the y-axis range
use_var_for_y : str
Set this to the name of a data variable in the Dataset to use as
the y-axis variable instead of the default dimension. Useful for
instances where data has an index-based dimension instead of a
height-based dimension. If shapes of arrays do not match it will
automatically revert back to the original ydata.
set_shading : string
Option to to set the matplotlib.pcolormesh shading parameter.
Default to 'auto'
assessment_overplot : boolean
Option to overplot quality control colored symbols over plotted
data using flag_assessment categories.
overplot_marker : str
Marker to use for overplot symbol.
overplot_behind : bool
Place the overplot marker behind the data point.
overplot_markersize : float or int
Size of overplot marker. If overplot_behind or force_line_plot
are set the marker size will be double overplot_markersize so
the color is visible.
assessment_overplot_category : dict
Lookup to categorize assessments into groups. This allows using
multiple terms for the same quality control level of failure.
Also allows adding more to the defaults.
assessment_overplot_category_color : dict
Lookup to match overplot category color to assessment grouping.
force_line_plot : boolean
Option to plot 2D data as 1D line plots.
labels : boolean or list
Option to overwrite the legend labels. Must have same dimensions as
number of lines plotted.
cbar_label : str
Option to overwrite default colorbar label.
cbar_h_adjust : float
Option to adjust location of colorbar horizontally. Positive values
move to right negative values move to left.
secondary_y : boolean
Option to plot on secondary y axis.
y_axis_flag_meanings : boolean or int
When set to True and plotting state variable with flag_values and
flag_meanings attributes will replace y axis numerical values
with flag_meanings value. Set to a positive number larger than 1
to indicate maximum word length to use. If text is longer that the
value and has space characters will split text over multiple lines.
colorbar_labels : dict
A dictionary containing values for plotting a 2D array of state variables.
The dictionary uses data values as keys and a dictionary containing keys
'text' and 'color' for each data value to plot.
Example:
{0: {'text': 'Clear sky', 'color': 'white'},
1: {'text': 'Liquid', 'color': 'green'},
2: {'text': 'Ice', 'color': 'blue'},
3: {'text': 'Mixed phase', 'color': 'purple'}}
cb_friendly : boolean
Set to true if you want to use the integrated colorblind friendly
colors for green/red based on the Homeyer colormap.
match_line_label_color : boolean
Will set the y label to match the line color in the plot. This
will only work if the time series plot is a line plot.
**kwargs : keyword arguments
The keyword arguments for :func:`plt.plot` (1D timeseries) or
:func:`plt.pcolormesh` (2D timeseries).
Returns
-------
ax : matplotlib axis handle
The matplotlib axis handle of the plot.
"""
if dsname is None and len(self._ds.keys()) > 1:
raise ValueError(
'You must choose a datastream when there are 2 '
'or more datasets in the TimeSeriesDisplay '
'object.'
)
elif dsname is None:
dsname = list(self._ds.keys())[0]
if y_axis_flag_meanings:
kwargs['linestyle'] = ''
if cb_friendly:
cmap = 'act_HomeyerRainbow'
assessment_overplot_category_color['Bad'] = (0.9285714285714286, 0.7130901016453677, 0.7130901016453677)
assessment_overplot_category_color['Incorrect'] = (0.9285714285714286, 0.7130901016453677, 0.7130901016453677)
assessment_overplot_category_color['Not Failing'] = (0.0, 0.4240129715562796, 0.4240129715562796),
assessment_overplot_category_color['Acceptable'] = (0.0, 0.4240129715562796, 0.4240129715562796),
# Get data and dimensions
data = self._ds[dsname][field]
dim = list(self._ds[dsname][field].dims)
xdata = self._ds[dsname][dim[0]]
if 'units' in data.attrs:
ytitle = ''.join(['(', data.attrs['units'], ')'])
else:
ytitle = field
if cbar_label is None:
cbar_default = ytitle
if len(dim) > 1:
if use_var_for_y is None:
ydata = self._ds[dsname][dim[1]]
else:
ydata = self._ds[dsname][use_var_for_y]
ydata_dim1 = self._ds[dsname][dim[1]]
if np.shape(ydata) != np.shape(ydata_dim1):
ydata = ydata_dim1
units = ytitle
if 'units' in ydata.attrs.keys():
units = ydata.attrs['units']
ytitle = ''.join(['(', units, ')'])
else:
units = ''
ytitle = dim[1]
# Create labels if 2d as 1d
if force_line_plot is True:
if labels is True:
labels = [' '.join([str(d), units]) for d in ydata.values]
ytitle = f"({data.attrs['units']})"
ydata = None
else:
ydata = None
# Get the current plotting axis, add day/night background and plot data
if self.fig is None:
self.fig = plt.figure()
if self.axes is None:
self.axes = np.array([plt.axes()])
self.fig.add_axes(self.axes[0])
# Set up secondary y axis if requested
if secondary_y is False:
ax = self.axes[subplot_index]
else:
ax = self.axes[subplot_index].twinx()
if colorbar_labels is not None:
flag_values = list(colorbar_labels.keys())
flag_meanings = [value['text'] for key, value in colorbar_labels.items()]
cbar_colors = [value['color'] for key, value in colorbar_labels.items()]
cmap = mpl.colors.ListedColormap(cbar_colors)
for ii, flag_meaning in enumerate(flag_meanings):
if len(flag_meaning) > 20:
flag_meaning = textwrap.fill(flag_meaning, width=20)
flag_meanings[ii] = flag_meaning
else:
flag_values = None
flag_meanings = None
cbar_colors = None
if ydata is None:
# Add in nans to ensure the data does not connect the line.
if add_nan is True:
xdata, data = data_utils.add_in_nan(xdata, data)
if day_night_background is True:
self.day_night_background(subplot_index=subplot_index, dsname=dsname)
# If limiting data being plotted use masked arrays
# Need to do it this way because of autoscale() method
if abs_limits[0] is not None and abs_limits[1] is not None:
data = np.ma.masked_outside(data, abs_limits[0], abs_limits[1])
elif abs_limits[0] is not None and abs_limits[1] is None:
data = np.ma.masked_less_equal(data, abs_limits[0])
elif abs_limits[0] is None and abs_limits[1] is not None:
data = np.ma.masked_greater_equal(data, abs_limits[1])
# Plot the data
if 'marker' not in kwargs.keys():
kwargs['marker'] = '.'
lines = ax.plot(xdata, data, **kwargs)
# Check if we need to call legend method after plotting. This is only
# called when no assessment overplot is called.
add_legend = False
if 'label' in kwargs.keys():
add_legend = True
# Overplot failing data if requested
if assessment_overplot:
# If we are doing forced line plot from 2D data need to manage
# legend lables. Will make arrays to hold labels of QC failing
# because not set when labels not set.
if not isinstance(labels, list) and add_legend is False:
labels = []
lines = []
# For forced line plot need to plot QC behind point instead of
# on top of point.
zorder = None
if force_line_plot or overplot_behind:
zorder = 0
overplot_markersize *= 2.0
for assessment, categories in assessment_overplot_category.items():
flag_data = self._ds[dsname].qcfilter.get_masked_data(
field, rm_assessments=categories, return_inverse=True
)
if np.invert(flag_data.mask).any() and np.isfinite(flag_data).any():
try:
flag_data.mask = np.logical_or(data.mask, flag_data.mask)
except AttributeError:
pass
qc_ax = ax.plot(
xdata,
flag_data,
marker=overplot_marker,
linestyle='',
markersize=overplot_markersize,
color=assessment_overplot_category_color[assessment],
label=assessment,
zorder=zorder,
)
# If labels keyword is set need to add labels for calling legend
if isinstance(labels, list):
# If plotting forced_line_plot need to subset the Line2D object
# so we don't have more than one added to legend.
if len(qc_ax) > 1:
lines.extend(qc_ax[:1])
else:
lines.extend(qc_ax)
labels.append(assessment)
add_legend = True
# Add legend if labels are available
if isinstance(labels, list):
ax.legend(lines, labels)
elif add_legend:
ax.legend()
# Change y axis to text from flag_meanings if requested.
if y_axis_flag_meanings:
flag_meanings = self._ds[dsname][field].attrs['flag_meanings']
flag_values = self._ds[dsname][field].attrs['flag_values']
# If keyword is larger than 1 assume this is the maximum character length
# desired and insert returns to wrap text.
if y_axis_flag_meanings > 1:
for ii, flag_meaning in enumerate(flag_meanings):
if len(flag_meaning) > y_axis_flag_meanings:
flag_meaning = textwrap.fill(flag_meaning, width=y_axis_flag_meanings)
flag_meanings[ii] = flag_meaning
ax.set_yticks(flag_values)
ax.set_yticklabels(flag_meanings)
else:
# Add in nans to ensure the data are not streaking
if add_nan is True:
xdata, data = data_utils.add_in_nan(xdata, data)
# Sets shading parameter to auto. Matplotlib will check deminsions.
# If X,Y and C are same deminsions shading is set to nearest.
# If X and Y deminsions are 1 greater than C shading is set to flat.
if 'edgecolors' not in kwargs.keys():
kwargs['edgecolors'] = 'face'
mesh = ax.pcolormesh(
np.asarray(xdata),
ydata,
data.transpose(),
shading=set_shading,
cmap=cmap,
**kwargs,
)
# Set Title
if set_title is None:
if isinstance(self._ds[dsname].time.values[0], np.datetime64):
set_title = ' '.join(
[
dsname,
field,
'on',
dt_utils.numpy_to_arm_date(self._ds[dsname].time.values[0]),
]
)
else:
date_result = search(
r'\d{4}-\d{1,2}-\d{1,2}', self._ds[dsname].time.attrs['units']
)
if date_result is not None:
set_title = ' '.join([dsname, field, 'on', date_result.group(0)])
else:
set_title = ' '.join([dsname, field])
if secondary_y is False:
ax.set_title(set_title)
# Set YTitle
if not y_axis_flag_meanings:
if match_line_label_color and len(ax.get_lines()) > 0:
ax.set_ylabel(ytitle, color=ax.get_lines()[0].get_color())
else:
ax.set_ylabel(ytitle)
# Set X Limit - We want the same time axes for all subplots
if not hasattr(self, 'time_rng'):
if time_rng is not None:
self.time_rng = list(time_rng)
else:
self.time_rng = [xdata.min().values, xdata.max().values]
self.set_xrng(self.time_rng, subplot_index)
# Set Y Limit
if y_rng is not None:
self.set_yrng(y_rng)
if hasattr(self, 'yrng'):
# Make sure that the yrng is not just the default
if ydata is None:
if abs_limits[0] is not None or abs_limits[1] is not None:
our_data = data
else:
our_data = data.values
else:
our_data = ydata
finite = np.isfinite(our_data)
if finite.any():
our_data = our_data[finite]
if invert_y_axis is False:
yrng = [np.min(our_data), np.max(our_data)]
else:
yrng = [np.max(our_data), np.min(our_data)]
else:
yrng = [0, 1]
# Check if current range is outside of new range an only set
# values that work for all data plotted.
if isinstance(yrng[0], np.datetime64):
yrng = mdates.datestr2num([str(yrng[0]), str(yrng[1])])
current_yrng = ax.get_ylim()
if invert_y_axis is False:
if yrng[0] > current_yrng[0]:
yrng[0] = current_yrng[0]
if yrng[1] < current_yrng[1]:
yrng[1] = current_yrng[1]
else:
if yrng[0] < current_yrng[0]:
yrng[0] = current_yrng[0]
if yrng[1] > current_yrng[1]:
yrng[1] = current_yrng[1]
# Set y range the normal way if not secondary y
# If secondary, just use set_ylim
if secondary_y is False:
self.set_yrng(yrng, subplot_index)
else:
ax.set_ylim(yrng)
# Set X Format
if len(subplot_index) == 1:
days = self.xrng[subplot_index, 1] - self.xrng[subplot_index, 0]
else:
days = self.xrng[subplot_index][1] - self.xrng[subplot_index][0]
myFmt = common.get_date_format(days)
ax.xaxis.set_major_formatter(myFmt)
# Set X format - We want the same time axes for all subplots
if not hasattr(self, 'time_fmt'):
self.time_fmt = myFmt
# Put on an xlabel, but only if we are making the bottom-most plot
if subplot_index[0] == self.axes.shape[0] - 1:
ax.set_xlabel('Time [UTC]')
if ydata is not None:
if cbar_label is None:
cbar_title = cbar_default
else:
cbar_title = ''.join(['(', cbar_label, ')'])
if colorbar_labels is not None:
cbar_title = None
cbar = self.add_colorbar(
mesh,
title=cbar_title,
subplot_index=subplot_index,
values=flag_values,
pad=cbar_h_adjust,
)
cbar.set_ticks(flag_values)
cbar.set_ticklabels(flag_meanings)
cbar.ax.tick_params(labelsize=10)
else:
self.add_colorbar(
mesh, title=cbar_title, subplot_index=subplot_index, pad=cbar_h_adjust
)
return ax
def plot_barbs_from_spd_dir(
self, speed_field, direction_field, pres_field=None, dsname=None, **kwargs
):
"""
This procedure will make a wind barb plot timeseries.
If a pressure field is given and the wind fields are 1D, which, for
example, would occur if one wants to plot a timeseries of
rawinsonde data, then a time-height cross section of
winds will be made.
Note: This procedure calls plot_barbs_from_u_v and will take in the
same keyword arguments as that procedure.
Parameters
----------
speed_field : str
The name of the field specifying the wind speed in m/s.
direction_field : str
The name of the field specifying the wind direction in degrees.
0 degrees is defined to be north and increases clockwise like
what is used in standard meteorological notation.
pres_field : str
The name of the field specifying pressure or height. If using
height coordinates, then we recommend setting invert_y_axis
to False.
dsname : str
The name of the datastream to plot. Setting to None will make
ACT attempt to autodetect this.
kwargs : dict
Any additional keyword arguments will be passed into
:func:`act.plotting.TimeSeriesDisplay.plot_barbs_from_u_and_v`.
Returns
-------
the_ax : matplotlib axis handle
The handle to the axis where the plot was made on.
Examples
--------
..code-block :: python
sonde_ds = act.io.armfiles.read_netcdf(
act.tests.sample_files.EXAMPLE_TWP_SONDE_WILDCARD)
BarbDisplay = act.plotting.TimeSeriesDisplay(
{'sonde_darwin': sonde_ds}, figsize=(10,5))
BarbDisplay.plot_barbs_from_spd_dir('deg', 'wspd', 'pres',
num_barbs_x=20)
"""
if dsname is None and len(self._ds.keys()) > 1:
raise ValueError(
'You must choose a datastream when there are 2 '
'or more datasets in the TimeSeriesDisplay '
'object.'
)
elif dsname is None:
dsname = list(self._ds.keys())[0]
# Make temporary field called tempu, tempv
spd = self._ds[dsname][speed_field]
dir = self._ds[dsname][direction_field]
tempu = -np.sin(np.deg2rad(dir)) * spd
tempv = -np.cos(np.deg2rad(dir)) * spd
self._ds[dsname]['temp_u'] = deepcopy(self._ds[dsname][speed_field])
self._ds[dsname]['temp_v'] = deepcopy(self._ds[dsname][speed_field])
self._ds[dsname]['temp_u'].values = tempu
self._ds[dsname]['temp_v'].values = tempv
the_ax = self.plot_barbs_from_u_v('temp_u', 'temp_v', pres_field, dsname, **kwargs)
del self._ds[dsname]['temp_u'], self._ds[dsname]['temp_v']
return the_ax
def plot_barbs_from_u_v(
self,
u_field,
v_field,
pres_field=None,
dsname=None,
subplot_index=(0,),
set_title=None,
day_night_background=False,
invert_y_axis=True,
num_barbs_x=20,
num_barbs_y=20,
use_var_for_y=None,
**kwargs,
):
"""
This function will plot a wind barb timeseries from u and v wind
data. If pres_field is given, a time-height series will be plotted
from 1-D wind data.
Parameters
----------
u_field : str
The name of the field containing the U component of the wind.
v_field : str
The name of the field containing the V component of the wind.
pres_field : str or None
The name of the field containing the pressure or height. Set
to None to not use this.
dsname : str or None
The name of the datastream to plot. Setting to None will make
ACT automatically try to determine this.
subplot_index : 2-tuple
The index of the subplot to make the plot on.
set_title : str or None
The title of the plot.
day_night_background : bool
Set to True to plot a day/night background.
invert_y_axis : bool
Set to True to invert the y axis (i.e. for plotting pressure as
the height coordinate).
num_barbs_x : int
The number of wind barbs to plot in the x axis.
num_barbs_y : int
The number of wind barbs to plot in the y axis.
cmap : matplotlib.colors.LinearSegmentedColormap
A color map to use with wind barbs. If this is set the plt.barbs
routine will be passed the C parameter scaled as sqrt of sum of the
squares and used with the passed in color map. A colorbar will also
be added. Setting the limits of the colorbar can be done with 'clim'.
Setting this changes the wind barbs from black to colors.
use_var_for_y : str
Set this to the name of a data variable in the Dataset to use as the
y-axis variable instead of the default dimension. Useful for instances
where data has an index-based dimension instead of a height-based
dimension. If shapes of arrays do not match it will automatically
revert back to the original ydata.
**kwargs : keyword arguments
Additional keyword arguments will be passed into plt.barbs.
Returns
-------
ax : matplotlib axis handle
The axis handle that contains the reference to the
constructed plot.
"""
if dsname is None and len(self._ds.keys()) > 1:
raise ValueError(
'You must choose a datastream when there are 2 '
'or more datasets in the TimeSeriesDisplay '
'object.'
)
elif dsname is None:
dsname = list(self._ds.keys())[0]
# Get data and dimensions
u = self._ds[dsname][u_field].values
v = self._ds[dsname][v_field].values
dim = list(self._ds[dsname][u_field].dims)
xdata = self._ds[dsname][dim[0]].values
num_x = xdata.shape[-1]
barb_step_x = round(num_x / num_barbs_x)
if barb_step_x == 0:
barb_step_x = 1
if len(dim) > 1 and pres_field is None:
if use_var_for_y is None:
ydata = self._ds[dsname][dim[1]]
else:
ydata = self._ds[dsname][use_var_for_y]
ydata_dim1 = self._ds[dsname][dim[1]]
if np.shape(ydata) != np.shape(ydata_dim1):
ydata = ydata_dim1
if 'units' in ydata.attrs:
units = ydata.attrs['units']
else:
units = ''
ytitle = ''.join(['(', units, ')'])
num_y = ydata.shape[0]
barb_step_y = round(num_y / num_barbs_y)
if barb_step_y == 0:
barb_step_y = 1
xdata, ydata = np.meshgrid(xdata, ydata, indexing='ij')
elif pres_field is not None:
# What we will do here is do a nearest-neighbor interpolation
# for each member of the series. Coordinates are time, pressure
pres = self._ds[dsname][pres_field]
u_interp = NearestNDInterpolator((xdata, pres.values), u, rescale=True)
v_interp = NearestNDInterpolator((xdata, pres.values), v, rescale=True)
barb_step_x = 1
barb_step_y = 1
x_times = pd.date_range(xdata.min(), xdata.max(), periods=num_barbs_x)
if num_barbs_y == 1:
y_levels = pres.mean()
else:
y_levels = np.linspace(np.nanmin(pres), np.nanmax(pres), num_barbs_y)
xdata, ydata = np.meshgrid(x_times, y_levels, indexing='ij')
u = u_interp(xdata, ydata)
v = v_interp(xdata, ydata)
if 'units' in pres.attrs:
units = pres.attrs['units']
else:
units = ''
ytitle = ''.join(['(', units, ')'])
else:
ydata = None
# Get the current plotting axis, add day/night background and plot data
if self.fig is None:
self.fig = plt.figure()
if self.axes is None:
self.axes = np.array([plt.axes()])
self.fig.add_axes(self.axes[0])
if ydata is None:
ydata = np.ones(xdata.shape)
if 'cmap' in kwargs.keys():
map_color = np.sqrt(np.power(u[::barb_step_x], 2) + np.power(v[::barb_step_x], 2))
map_color[np.isnan(map_color)] = 0
ax = self.axes[subplot_index].barbs(
xdata[::barb_step_x],
ydata[::barb_step_x],
u[::barb_step_x],
v[::barb_step_x],
map_color,
**kwargs,
)
plt.colorbar(
ax,
ax=[self.axes[subplot_index]],
label='Wind Speed (' + self._ds[dsname][u_field].attrs['units'] + ')',
)
else:
self.axes[subplot_index].barbs(
xdata[::barb_step_x],
ydata[::barb_step_x],
u[::barb_step_x],
v[::barb_step_x],
**kwargs,
)
self.axes[subplot_index].set_yticks([])
else:
if 'cmap' in kwargs.keys():
map_color = np.sqrt(
np.power(u[::barb_step_x, ::barb_step_y], 2)
+ np.power(v[::barb_step_x, ::barb_step_y], 2)
)
map_color[np.isnan(map_color)] = 0
ax = self.axes[subplot_index].barbs(
xdata[::barb_step_x, ::barb_step_y],
ydata[::barb_step_x, ::barb_step_y],
u[::barb_step_x, ::barb_step_y],
v[::barb_step_x, ::barb_step_y],
map_color,
**kwargs,
)
plt.colorbar(
ax,
ax=[self.axes[subplot_index]],
label='Wind Speed (' + self._ds[dsname][u_field].attrs['units'] + ')',
)
else:
ax = self.axes[subplot_index].barbs(
xdata[::barb_step_x, ::barb_step_y],
ydata[::barb_step_x, ::barb_step_y],
u[::barb_step_x, ::barb_step_y],
v[::barb_step_x, ::barb_step_y],
**kwargs,
)
if day_night_background is True:
self.day_night_background(subplot_index=subplot_index, dsname=dsname)
# Set Title
if set_title is None:
set_title = ' '.join(
[
dsname,
'on',
dt_utils.numpy_to_arm_date(self._ds[dsname].time.values[0]),
]
)
self.axes[subplot_index].set_title(set_title)
# Set YTitle
if 'ytitle' in locals():
self.axes[subplot_index].set_ylabel(ytitle)
# Set X Limit - We want the same time axes for all subplots
time_rng = [xdata.min(), xdata.max()]
self.set_xrng(time_rng, subplot_index)
# Set Y Limit
if hasattr(self, 'yrng'):
# Make sure that the yrng is not just the default
if not np.all(self.yrng[subplot_index] == 0):
self.set_yrng(self.yrng[subplot_index], subplot_index)
else:
if ydata is None:
our_data = xdata
else:
our_data = ydata
if np.isfinite(our_data).any():
if invert_y_axis is False:
yrng = [np.nanmin(our_data), np.nanmax(our_data)]
else:
yrng = [np.nanmax(our_data), np.nanmin(our_data)]
else:
yrng = [0, 1]
self.set_yrng(yrng, subplot_index)
# Set X Format
if len(subplot_index) == 1:
days = self.xrng[subplot_index, 1] - self.xrng[subplot_index, 0]
else:
days = (
self.xrng[subplot_index[0], subplot_index[1], 1]
- self.xrng[subplot_index[0], subplot_index[1], 0]
)
# Put on an xlabel, but only if we are making the bottom-most plot
if subplot_index[0] == self.axes.shape[0] - 1:
self.axes[subplot_index].set_xlabel('Time [UTC]')
myFmt = common.get_date_format(days)
self.axes[subplot_index].xaxis.set_major_formatter(myFmt)
return self.axes[subplot_index]
def plot_time_height_xsection_from_1d_data(
self,
data_field,
pres_field,
dsname=None,
subplot_index=(0,),
set_title=None,
day_night_background=False,
num_time_periods=20,
num_y_levels=20,
invert_y_axis=True,
cbar_label=None,
set_shading='auto',
**kwargs,
):
"""
This will plot a time-height cross section from 1D datasets using
nearest neighbor interpolation on a regular time by height grid.
All that is needed are a data variable and a height variable.
Parameters
----------
data_field : str
The name of the field to plot.
pres_field : str
The name of the height or pressure field to plot.
dsname : str or None
The name of the datastream to plot
subplot_index : 2-tuple
The index of the subplot to create the plot on.
set_title : str or None
The title of the plot.
day_night_background : bool
Set to true to plot the day/night background.
num_time_periods : int
Set to determine how many time periods. Setting to None
will do one time period per day.
num_y_levels : int
The number of levels in the y axis to use.
invert_y_axis : bool
Set to true to invert the y-axis (recommended for
pressure coordinates).
cbar_label : str
Option to overwrite default colorbar label.
set_shading : string
Option to to set the matplotlib.pcolormesh shading parameter.
Default to 'auto'
**kwargs : keyword arguments
Additional keyword arguments will be passed
into :func:`plt.pcolormesh`
Returns
-------
ax : matplotlib axis handle
The matplotlib axis handle pointing to the plot.
"""
if dsname is None and len(self._ds.keys()) > 1:
raise ValueError(
'You must choose a datastream when there are 2'
'or more datasets in the TimeSeriesDisplay'
'object.'
)
elif dsname is None:
dsname = list(self._ds.keys())[0]
dim = list(self._ds[dsname][data_field].dims)
if len(dim) > 1:
raise ValueError(
'plot_time_height_xsection_from_1d_data only '
'supports 1-D datasets. For datasets with 2 or '
'more dimensions use plot().'
)
# Get data and dimensions
data = self._ds[dsname][data_field].values
xdata = self._ds[dsname][dim[0]].values
# What we will do here is do a nearest-neighbor interpolation for each
# member of the series. Coordinates are time, pressure
pres = self._ds[dsname][pres_field]
u_interp = NearestNDInterpolator((xdata, pres.values), data, rescale=True)
# Mask points where we have no data
# Count number of unique days
x_times = pd.date_range(xdata.min(), xdata.max(), periods=num_time_periods)
y_levels = np.linspace(np.nanmin(pres), np.nanmax(pres), num_y_levels)
tdata, ydata = np.meshgrid(x_times, y_levels, indexing='ij')
data = u_interp(tdata, ydata)
ytitle = ''.join(['(', pres.attrs['units'], ')'])
units = data_field + ' (' + self._ds[dsname][data_field].attrs['units'] + ')'
# Get the current plotting axis, add day/night background and plot data
if self.fig is None:
self.fig = plt.figure()
if self.axes is None:
self.axes = np.array([plt.axes()])
self.fig.add_axes(self.axes[0])
mesh = self.axes[subplot_index].pcolormesh(
x_times, y_levels, np.transpose(data), shading=set_shading, **kwargs
)
if day_night_background is True:
self.day_night_background(subplot_index=subplot_index, dsname=dsname)
# Set Title
if set_title is None:
set_title = ' '.join(
[
dsname,
'on',
dt_utils.numpy_to_arm_date(self._ds[dsname].time.values[0]),
]
)
self.axes[subplot_index].set_title(set_title)
# Set YTitle
if 'ytitle' in locals():
self.axes[subplot_index].set_ylabel(ytitle)
# Set X Limit - We want the same time axes for all subplots
time_rng = [x_times[0], x_times[-1]]
self.set_xrng(time_rng, subplot_index)
# Set Y Limit
if hasattr(self, 'yrng'):
# Make sure that the yrng is not just the default
if not np.all(self.yrng[subplot_index] == 0):
self.set_yrng(self.yrng[subplot_index], subplot_index)
else:
if ydata is None:
our_data = data.values
else:
our_data = ydata
if np.isfinite(our_data).any():
if invert_y_axis is False:
yrng = [np.nanmin(our_data), np.nanmax(our_data)]
else:
yrng = [np.nanmax(our_data), np.nanmin(our_data)]
else:
yrng = [0, 1]
self.set_yrng(yrng, subplot_index)
# Set X Format
if len(subplot_index) == 1:
days = self.xrng[subplot_index, 1] - self.xrng[subplot_index, 0]
else:
days = (
self.xrng[subplot_index[0], subplot_index[1], 1]
- self.xrng[subplot_index[0], subplot_index[1], 0]
)
# Put on an xlabel, but only if we are making the bottom-most plot
if subplot_index[0] == self.axes.shape[0] - 1:
self.axes[subplot_index].set_xlabel('Time [UTC]')
if ydata is not None:
if cbar_label is None:
self.add_colorbar(mesh, title=units, subplot_index=subplot_index)
else:
self.add_colorbar(mesh, title=cbar_label, subplot_index=subplot_index)
myFmt = common.get_date_format(days)
self.axes[subplot_index].xaxis.set_major_formatter(myFmt)
return self.axes[subplot_index]
def time_height_scatter(
self,
data_field=None,
dsname=None,
cmap='rainbow',
alt_label=None,
alt_field='alt',
cb_label=None,
**kwargs,
):
"""
Create a time series plot of altitude and data variable with
color also indicating value with a color bar. The Color bar is
positioned to serve both as the indicator of the color intensity
and the second y-axis.
Parameters
----------
data_field : str
Name of data field in the dataset to plot on second y-axis.
height_field : str
Name of height field in the dataset to plot on first y-axis.
dsname : str or None
The name of the datastream to plot.
cmap : str
Colorbar color map to use.
alt_label : str
Altitude first y-axis label to use. If None, will try to use
long_name and units.
alt_field : str
Label for field in the dataset to plot on first y-axis.
cb_label : str
Colorbar label to use. If not set will try to use
long_name and units.
**kwargs : keyword arguments
Any other keyword arguments that will be passed
into TimeSeriesDisplay.plot module when the figure
is made.
"""
if dsname is None and len(self._ds.keys()) > 1:
raise ValueError(
'You must choose a datastream when there are 2 '
'or more datasets in the TimeSeriesDisplay '
'object.'
)
elif dsname is None:
dsname = list(self._ds.keys())[0]
# Get data and dimensions
data = self._ds[dsname][data_field]
altitude = self._ds[dsname][alt_field]
dim = list(self._ds[dsname][data_field].dims)
xdata = self._ds[dsname][dim[0]]
if alt_label is None:
try:
alt_label = altitude.attrs['long_name'] + ''.join(
[' (', altitude.attrs['units'], ')']
)
except KeyError:
alt_label = alt_field
if cb_label is None:
try:
cb_label = data.attrs['long_name'] + ''.join([' (', data.attrs['units'], ')'])
except KeyError:
cb_label = data_field
colorbar_map = plt.cm.get_cmap(cmap)
self.fig.subplots_adjust(left=0.1, right=0.86, bottom=0.16, top=0.91)
ax1 = self.plot(alt_field, color='black', **kwargs)
ax1.set_ylabel(alt_label)
ax2 = ax1.twinx()
sc = ax2.scatter(xdata.values, data.values, c=data.values, marker='.', cmap=colorbar_map)
cbaxes = self.fig.add_axes(
[
self.fig.subplotpars.right + 0.02,
self.fig.subplotpars.bottom,
0.02,
self.fig.subplotpars.top - self.fig.subplotpars.bottom,
]
)
cbar = plt.colorbar(sc, cax=cbaxes)
ax2.set_ylim(cbar.mappable.get_clim())
cbar.ax.set_ylabel(cb_label)
ax2.set_yticklabels([])
return self.axes[0]
def qc_flag_block_plot(
self,
data_field=None,
dsname=None,
subplot_index=(0,),
time_rng=None,
assessment_color=None,
edgecolor='face',
set_shading='auto',
cb_friendly=False,
**kwargs,
):
"""
Create a time series plot of embedded quality control values
using broken barh plotting.
Parameters
----------
data_field : str
Name of data field in the dataset to plot corresponding quality
control.
dsname : None or str
If there is more than one datastream in the display object the
name of the datastream needs to be specified. If set to None and
there is only one datastream ACT will use the sole datastream
in the object.
subplot_index : 1 or 2D tuple, list, or array
The index of the subplot to set the x range of.
time_rng : tuple or list
List or tuple with (min, max) values to set the x-axis range limits.
assessment_color : dict
Dictionary lookup to override default assessment to color. Make sure
assessment work is correctly set with case syntax.
edgecolor : str or list
Color name, list of color names or 'face' as defined in matplotlib.axes.Axes.broken_barh
set_shading : string
Option to to set the matplotlib.pcolormesh shading parameter.
Default to 'auto'
cb_friendly : boolean
Set to true if you want to use the integrated colorblind friendly
colors for green/red based on the Homeyer colormap
**kwargs : keyword arguments
The keyword arguments for :func:`plt.broken_barh`.
"""
# Color to plot associated with assessment.
color_lookup = {
'Bad': 'red',
'Incorrect': 'red',
'Indeterminate': 'orange',
'Suspect': 'orange',
'Missing': 'darkgray',
'Not Failing': 'green',
'Acceptable': 'green',
}
if cb_friendly:
color_lookup['Bad'] = (0.9285714285714286, 0.7130901016453677, 0.7130901016453677)
color_lookup['Incorrect'] = (0.9285714285714286, 0.7130901016453677, 0.7130901016453677)
color_lookup['Not Failing'] = (0.0, 0.4240129715562796, 0.4240129715562796),
color_lookup['Acceptable'] = (0.0, 0.4240129715562796, 0.4240129715562796),
if assessment_color is not None:
for asses, color in assessment_color.items():
color_lookup[asses] = color
if asses == 'Incorrect':
color_lookup['Bad'] = color
if asses == 'Suspect':
color_lookup['Indeterminate'] = color
# Set up list of test names to use for missing values
missing_val_long_names = [
'Value equal to missing_value*',
'Value set to missing_value*',
'Value is equal to missing_value*',
'Value is set to missing_value*',
]
if dsname is None and len(self._ds.keys()) > 1:
raise ValueError(
'You must choose a datastream when there are 2 '
'or more datasets in the TimeSeriesDisplay '
'object.'
)
elif dsname is None:
dsname = list(self._ds.keys())[0]
# Set up or get current plot figure
if self.fig is None:
self.fig = plt.figure()
# Set up or get current axes
if self.axes is None:
self.axes = np.array([plt.axes()])
self.fig.add_axes(self.axes[0])
ax = self.axes[subplot_index]
# Set X Limit - We want the same time axes for all subplots
data = self._ds[dsname][data_field]
dim = list(self._ds[dsname][data_field].dims)
xdata = self._ds[dsname][dim[0]]
# Get data and attributes
qc_data_field = self._ds[dsname].qcfilter.check_for_ancillary_qc(
data_field, add_if_missing=False, cleanup=False
)
if qc_data_field is None:
raise ValueError(f'No quality control ancillary variable in Dataset for {data_field}')
flag_masks = self._ds[dsname][qc_data_field].attrs['flag_masks']
flag_meanings = self._ds[dsname][qc_data_field].attrs['flag_meanings']
flag_assessments = self._ds[dsname][qc_data_field].attrs['flag_assessments']
# Get time ranges for green blocks
time_delta = determine_time_delta(xdata.values)
barh_list_green = reduce_time_ranges(xdata.values, time_delta=time_delta, broken_barh=True)
# Set background to gray indicating not available data
ax.set_facecolor('dimgray')
# Check if plotting 2D data vs 1D data. 2D data will be summarized by
# assessment category instead of showing each test.
data_shape = self._ds[dsname][qc_data_field].shape
if len(data_shape) > 1:
cur_assessments = list(set(flag_assessments))
cur_assessments.sort()
cur_assessments.reverse()
qc_data = np.full(data_shape, -1, dtype=np.int16)
plot_colors = []
tick_names = []
index = self._ds[dsname][qc_data_field].values == 0
if index.any():
qc_data[index] = 0
plot_colors.append(color_lookup['Not Failing'])
tick_names.append('Not Failing')
for ii, assess in enumerate(cur_assessments):
if assess not in color_lookup:
color_lookup[assess] = list(mplcolors.CSS4_COLORS.keys())[ii]
ii += 1
assess_data = self._ds[dsname].qcfilter.get_masked_data(
data_field, rm_assessments=assess
)
if assess_data.mask.any():
qc_data[assess_data.mask] = ii
plot_colors.append(color_lookup[assess])
tick_names.append(assess)
# Overwrite missing data. Not sure if we want to do this because VAPs set
# the value to missing but the test is set to Bad. This tries to overcome that
# by looking for correct test description that would only indicate the values
# are missing not that they are set to missing by a test... most likely.
missing_test_nums = []
for ii, flag_meaning in enumerate(flag_meanings):
# Check if the bit set is indicating missing data.
for val in missing_val_long_names:
if re_search(val, flag_meaning):
test_num = parse_bit(flag_masks[ii])[0]
missing_test_nums.append(test_num)
assess_data = self._ds[dsname].qcfilter.get_masked_data(
data_field, rm_tests=missing_test_nums
)
if assess_data.mask.any():
qc_data[assess_data.mask] = -1
plot_colors.append(color_lookup['Missing'])
tick_names.append('Missing')
# Create a masked array to allow not plotting where values are missing
qc_data = np.ma.masked_equal(qc_data, -1)
dims = self._ds[dsname][qc_data_field].dims
xvalues = self._ds[dsname][dims[0]].values
yvalues = self._ds[dsname][dims[1]].values
cMap = mplcolors.ListedColormap(plot_colors)
mesh = ax.pcolormesh(
xvalues,
yvalues,
np.transpose(qc_data),
cmap=cMap,
vmin=0,
shading=set_shading,
)
divider = make_axes_locatable(ax)
# Determine correct placement of words on colorbar
tick_nums = (
np.arange(0, len(tick_names) * 2 + 1) / (len(tick_names) * 2) * np.nanmax(qc_data)
)[1::2]
cax = divider.append_axes('bottom', size='5%', pad=0.3)
cbar = self.fig.colorbar(
mesh,
cax=cax,
orientation='horizontal',
spacing='uniform',
ticks=tick_nums,
shrink=0.5,
)
cbar.ax.set_xticklabels(tick_names)
# Set YTitle
dim_name = list(set(self._ds[dsname][qc_data_field].dims) - {'time'})
try:
ytitle = f"{dim_name[0]} ({self._ds[dsname][dim_name[0]].attrs['units']})"
ax.set_ylabel(ytitle)
except KeyError:
pass
# Add which tests were set as text to the plot
unique_values = []
for ii in np.unique(self._ds[dsname][qc_data_field].values):
unique_values.extend(parse_bit(ii))
if len(unique_values) > 0:
unique_values = list(set(unique_values))
unique_values.sort()
unique_values = [str(ii) for ii in unique_values]
self.fig.text(
0.5,
-0.35,
f"QC Tests Tripped: {', '.join(unique_values)}",
transform=ax.transAxes,
horizontalalignment='center',
verticalalignment='center',
fontweight='bold',
)
else:
test_nums = []
for ii, assess in enumerate(flag_assessments):
if assess not in color_lookup:
color_lookup[assess] = list(mplcolors.CSS4_COLORS.keys())[ii]
# Plot green data first.
ax.broken_barh(
barh_list_green,
(ii, ii + 1),
facecolors=color_lookup['Not Failing'],
edgecolor=edgecolor,
**kwargs,
)
# Get test number from flag_mask bitpacked number
test_nums.append(parse_bit(flag_masks[ii]))
# Get masked array data to use mask for finding if/where test is set
data = self._ds[dsname].qcfilter.get_masked_data(
data_field, rm_tests=test_nums[-1]
)
if np.any(data.mask):
# Get time ranges from time and masked data
barh_list = reduce_time_ranges(
xdata.values[data.mask], time_delta=time_delta, broken_barh=True
)
# Check if the bit set is indicating missing data. If so change
# to different plotting color than what is in flag_assessments.
for val in missing_val_long_names:
if re_search(val, flag_meanings[ii]):
assess = 'Missing'
break
# Lay down blocks of tripped tests using correct color
ax.broken_barh(
barh_list,
(ii, ii + 1),
facecolors=color_lookup[assess],
edgecolor=edgecolor,
**kwargs,
)
# Add test description to plot.
ax.text(xdata.values[0], ii + 0.5, ' ' + flag_meanings[ii], va='center')
# Change y ticks to test number
plt.yticks(
[ii + 0.5 for ii in range(0, len(test_nums))],
labels=['Test ' + str(ii[0]) for ii in test_nums],
)
# Set ylimit to number of tests plotted
ax.set_ylim(0, len(flag_assessments))
# Set X Limit - We want the same time axes for all subplots
if not hasattr(self, 'time_rng'):
if time_rng is not None:
self.time_rng = list(time_rng)
else:
self.time_rng = [xdata.min().values, xdata.max().values]
self.set_xrng(self.time_rng, subplot_index)
# Get X format - We want the same time axes for all subplots
if hasattr(self, 'time_fmt'):
ax.xaxis.set_major_formatter(self.time_fmt)
else:
# Set X Format
if len(subplot_index) == 1:
days = self.xrng[subplot_index, 1] - self.xrng[subplot_index, 0]
else:
days = (
self.xrng[subplot_index[0], subplot_index[1], 1]
- self.xrng[subplot_index[0], subplot_index[1], 0]
)
myFmt = common.get_date_format(days)
ax.xaxis.set_major_formatter(myFmt)
self.time_fmt = myFmt
return self.axes[subplot_index]
def fill_between(
self,
field,
dsname=None,
subplot_index=(0,),
set_title=None,
secondary_y=False,
**kwargs,
):
"""
Makes a fill_between plot, based on matplotlib
Parameters
----------
field : str
The name of the field to plot.
dsname : None or str
If there is more than one datastream in the display object the
name of the datastream needs to be specified. If set to None and
there is only one datastream ACT will use the sole datastream
in the object.
subplot_index : 1 or 2D tuple, list, or array
The index of the subplot to set the x range of.
set_title : str
The title for the plot.
secondary_y : boolean
Option to indicate if the data should be plotted on second y-axis.
**kwargs : keyword arguments
The keyword arguments for :func:`plt.plot` (1D timeseries) or
:func:`plt.pcolormesh` (2D timeseries).
Returns
-------
ax : matplotlib axis handle
The matplotlib axis handle of the plot.
"""
if dsname is None and len(self._ds.keys()) > 1:
raise ValueError(
'You must choose a datastream when there are 2 '
'or more datasets in the TimeSeriesDisplay '
'object.'
)
elif dsname is None:
dsname = list(self._ds.keys())[0]
# Get data and dimensions
data = self._ds[dsname][field]
dim = list(self._ds[dsname][field].dims)
xdata = self._ds[dsname][dim[0]]
if 'units' in data.attrs:
ytitle = ''.join(['(', data.attrs['units'], ')'])
else:
ytitle = field
# Get the current plotting axis, add day/night background and plot data
if self.fig is None:
self.fig = plt.figure()
if self.axes is None:
self.axes = np.array([plt.axes()])
self.fig.add_axes(self.axes[0])
# Set ax to appropriate axis
if secondary_y is False:
ax = self.axes[subplot_index]
else:
ax = self.axes[subplot_index].twinx()
ax.fill_between(xdata.values, data, **kwargs)
# Set X Format
if len(subplot_index) == 1:
days = self.xrng[subplot_index, 1] - self.xrng[subplot_index, 0]
else:
days = (
self.xrng[subplot_index[0], subplot_index[1], 1]
- self.xrng[subplot_index[0], subplot_index[1], 0]
)
myFmt = common.get_date_format(days)
ax.xaxis.set_major_formatter(myFmt)
# Set X format - We want the same time axes for all subplots
if not hasattr(self, 'time_fmt'):
self.time_fmt = myFmt
# Put on an xlabel, but only if we are making the bottom-most plot
if subplot_index[0] == self.axes.shape[0] - 1:
self.axes[subplot_index].set_xlabel('Time [UTC]')
# Set YTitle
ax.set_ylabel(ytitle)
# Set Title
if set_title is None:
set_title = ' '.join(
[
dsname,
field,
'on',
dt_utils.numpy_to_arm_date(self._ds[dsname].time.values[0]),
]
)
if secondary_y is False:
ax.set_title(set_title)
return self.axes[subplot_index]
<|code_end|>
|
Add option to include required global attributes when writing to netCDF
Some people are planning to use ACT to generate official datastreams and need to have required global attributes set. We should help them add the correct attributes with correct values.
|
act/utils/inst_utils.py
<|code_start|>"""
Functions containing utilities for instruments.
"""
def decode_present_weather(ds, variable=None, decoded_name=None):
"""
This function is to decode codes reported from automatic weather stations suchas the PWD22.
This is based on WMO Table 4680.
Parameters
----------
ds : xarray.Dataset
ACT or Xarray dataset from which to convert codes
variable : string
Variable to decode
decoded_name : string
New variable name to store updated labels
Returns
-------
ds : xarray.Dataset
Returns dataset with new decoded data
References
----------
WMO Manual on Code Volume I.1
https://www.wmo.int/pages/prog/www/WMOCodes/WMO306_vI1/Publications/2017update/Sel9.pdf
"""
# Check to ensure that a variable name is passed
if variable is None:
raise ValueError('You must specify a variable')
if variable not in ds:
raise ValueError('Variable not in the dataset')
# Define the weather hash
weather = {
0: 'No significant weather observed',
1: 'Clouds generally dissolving or becoming less developed during the past hour',
2: 'State of the sky on the whole unchanged during the past hour',
3: 'Clouds generally forming or developing during the past hour',
4: 'Haze or smoke, or dust in suspension in the air, visibility >= 1 km',
5: 'Haze or smoke, or dust in suspension in the air, visibility < 1 km',
10: 'Mist',
11: 'Diamond dust',
12: 'Distant lightning',
18: 'Squalls',
20: 'Fog',
21: 'Precipitation',
22: 'Drizzle (not freezing) or snow grains',
23: 'Rain (not freezing)',
24: 'Snow',
25: 'Freezing drizzle or freezing rain',
26: 'Thunderstorm (with or without precipitation)',
27: 'Blowing or drifting snow or sand',
28: 'Blowing or drifting snow or sand, visibility >= 1 km',
29: 'Blowing or drifting snow or sand, visibility < 1 km',
30: 'Fog',
31: 'Fog or ice fog in patches',
32: 'Fog or ice fog, has become thinner during the past hour',
33: 'Fog or ice fog, no appreciable change during the past hour',
34: 'Fog or ice fog, has begun or become thicker during the past hour',
35: 'Fog, depositing rime',
40: 'Precipitation',
41: 'Precipitation, slight or moderate',
42: 'Precipitation, heavy',
43: 'Liquid precipitation, slight or moderate',
44: 'Liquid precipitation, heavy',
45: 'Solid precipitation, slight or moderate',
46: 'Solid precipitation, heavy',
47: 'Freezing precipitation, slight or moderate',
48: 'Freezing precipitation, heavy',
50: 'Drizzle',
51: 'Drizzle, not freezing, slight',
52: 'Drizzle, not freezing, moderate',
53: 'Drizzle, not freezing, heavy',
54: 'Drizzle, freezing, slight',
55: 'Drizzle, freezing, moderate',
56: 'Drizzle, freezing, heavy',
57: 'Drizzle and rain, slight',
58: 'Drizzle and rain, moderate or heavy',
60: 'Rain',
61: 'Rain, not freezing, slight',
62: 'Rain, not freezing, moderate',
63: 'Rain, not freezing, heavy',
64: 'Rain, freezing, slight',
65: 'Rain, freezing, moderate',
66: 'Rain, freezing, heavy',
67: 'Rain (or drizzle) and snow, slight',
68: 'Rain (or drizzle) and snow, moderate or heavy',
70: 'Snow',
71: 'Snow, light',
72: 'Snow, moderate',
73: 'Snow, heavy',
74: 'Ice pellets, slight',
75: 'Ice pellets, moderate',
76: 'Ice pellets, heavy',
77: 'Snow grains',
78: 'Ice crystals',
80: 'Shower(s) or Intermittent Precipitation',
81: 'Rain shower(s) or intermittent rain, slight',
82: 'Rain shower(s) or intermittent rain, moderate',
83: 'Rain shower(s) or intermittent rain, heavy',
84: 'Rain shower(s) or intermittent rain, violent',
85: 'Snow shower(s) or intermittent snow, slight',
86: 'Snow shower(s) or intermittent snow, moderate',
87: 'Snow shower(s) or intermittent snow, heavy',
89: 'Hail',
90: 'Thunderstorm',
91: 'Thunderstorm, slight or moderate, with no precipitation',
92: 'Thunderstorm, slight or moderate, with rain showers and/or snow showers',
93: 'Thunderstorm, slight or moderate, with hail',
94: 'Thunderstorm, heavy, with no precipitation',
95: 'Thunderstorm, heavy, with rain showers and/or snow showers',
96: 'Thunderstorm, heavy, with hail',
99: 'Tornado',
-9999: 'Missing',
}
# If a decoded name is not passed, make one
if decoded_name is None:
decoded_name = variable + '_decoded'
# Get data and fill nans with -9999
data = ds[variable]
data = data.fillna(-9999)
# Get the weather type for each code
wx_type = [weather[d] for d in data.values]
# Massage the data array to set back in the dataset
data.values = wx_type
if 'long_name' in data.attrs:
data.attrs['long_name'] = data.attrs['long_name'] + ' Decoded'
else:
data.attrs['long_name'] = 'Decoded present weather values'
if 'valid_min' in data.attrs:
del data.attrs['valid_min']
if 'valid_max' in data.attrs:
del data.attrs['valid_max']
ds[decoded_name] = data
return ds
<|code_end|>
examples/io/plot_create_arm_ds.py
<|code_start|><|code_end|>
|
act/utils/inst_utils.py
<|code_start|>"""
Functions containing utilities for instruments.
"""
def decode_present_weather(ds, variable=None, decoded_name=None):
"""
This function is to decode codes reported from automatic weather stations suchas the PWD22.
This is based on WMO Table 4680.
Parameters
----------
ds : xarray.Dataset
ACT or Xarray dataset from which to convert codes
variable : string
Variable to decode
decoded_name : string
New variable name to store updated labels
Returns
-------
ds : xarray.Dataset
Returns dataset with new decoded data
References
----------
WMO Manual on Code Volume I.1
https://www.wmo.int/pages/prog/www/WMOCodes/WMO306_vI1/Publications/2017update/Sel9.pdf
"""
# Check to ensure that a variable name is passed
if variable is None:
raise ValueError('You must specify a variable')
if variable not in ds:
raise ValueError('Variable not in the dataset')
# Define the weather hash
weather = {
0: 'No significant weather observed',
1: 'Clouds generally dissolving or becoming less developed during the past hour',
2: 'State of the sky on the whole unchanged during the past hour',
3: 'Clouds generally forming or developing during the past hour',
4: 'Haze or smoke, or dust in suspension in the air, visibility >= 1 km',
5: 'Haze or smoke, or dust in suspension in the air, visibility < 1 km',
10: 'Mist',
11: 'Diamond dust',
12: 'Distant lightning',
18: 'Squalls',
20: 'Fog',
21: 'Precipitation',
22: 'Drizzle (not freezing) or snow grains',
23: 'Rain (not freezing)',
24: 'Snow',
25: 'Freezing drizzle or freezing rain',
26: 'Thunderstorm (with or without precipitation)',
27: 'Blowing or drifting snow or sand',
28: 'Blowing or drifting snow or sand, visibility >= 1 km',
29: 'Blowing or drifting snow or sand, visibility < 1 km',
30: 'Fog',
31: 'Fog or ice fog in patches',
32: 'Fog or ice fog, has become thinner during the past hour',
33: 'Fog or ice fog, no appreciable change during the past hour',
34: 'Fog or ice fog, has begun or become thicker during the past hour',
35: 'Fog, depositing rime',
40: 'Precipitation',
41: 'Precipitation, slight or moderate',
42: 'Precipitation, heavy',
43: 'Liquid precipitation, slight or moderate',
44: 'Liquid precipitation, heavy',
45: 'Solid precipitation, slight or moderate',
46: 'Solid precipitation, heavy',
47: 'Freezing precipitation, slight or moderate',
48: 'Freezing precipitation, heavy',
50: 'Drizzle',
51: 'Drizzle, not freezing, slight',
52: 'Drizzle, not freezing, moderate',
53: 'Drizzle, not freezing, heavy',
54: 'Drizzle, freezing, slight',
55: 'Drizzle, freezing, moderate',
56: 'Drizzle, freezing, heavy',
57: 'Drizzle and rain, slight',
58: 'Drizzle and rain, moderate or heavy',
60: 'Rain',
61: 'Rain, not freezing, slight',
62: 'Rain, not freezing, moderate',
63: 'Rain, not freezing, heavy',
64: 'Rain, freezing, slight',
65: 'Rain, freezing, moderate',
66: 'Rain, freezing, heavy',
67: 'Rain (or drizzle) and snow, slight',
68: 'Rain (or drizzle) and snow, moderate or heavy',
70: 'Snow',
71: 'Snow, light',
72: 'Snow, moderate',
73: 'Snow, heavy',
74: 'Ice pellets, slight',
75: 'Ice pellets, moderate',
76: 'Ice pellets, heavy',
77: 'Snow grains',
78: 'Ice crystals',
80: 'Shower(s) or Intermittent Precipitation',
81: 'Rain shower(s) or intermittent rain, slight',
82: 'Rain shower(s) or intermittent rain, moderate',
83: 'Rain shower(s) or intermittent rain, heavy',
84: 'Rain shower(s) or intermittent rain, violent',
85: 'Snow shower(s) or intermittent snow, slight',
86: 'Snow shower(s) or intermittent snow, moderate',
87: 'Snow shower(s) or intermittent snow, heavy',
89: 'Hail',
90: 'Thunderstorm',
91: 'Thunderstorm, slight or moderate, with no precipitation',
92: 'Thunderstorm, slight or moderate, with rain showers and/or snow showers',
93: 'Thunderstorm, slight or moderate, with hail',
94: 'Thunderstorm, heavy, with no precipitation',
95: 'Thunderstorm, heavy, with rain showers and/or snow showers',
96: 'Thunderstorm, heavy, with hail',
99: 'Tornado',
-9999: 'Missing',
}
# If a decoded name is not passed, make one
if decoded_name is None:
decoded_name = variable + '_decoded'
# Get data and fill nans with -9999
data = ds[variable]
data = data.fillna(-9999)
# Get the weather type for each code
wx_type = [weather[d] for d in data.values]
# Massage the data array to set back in the dataset
data.values = wx_type
if 'long_name' in data.attrs:
data.attrs['long_name'] = data.attrs['long_name'] + ' Decoded'
else:
data.attrs['long_name'] = 'Decoded present weather values'
if 'valid_min' in data.attrs:
del data.attrs['valid_min']
if 'valid_max' in data.attrs:
del data.attrs['valid_max']
ds[decoded_name] = data
return ds
<|code_end|>
examples/io/plot_create_arm_ds.py
<|code_start|>"""
Create a dataset to mimic ARM file formats
------------------------------------------
Example shows how to create a dataset from an ARM DOD.
This will enable users to create files that mimic ARM
files, making for easier use across the community.
Author: Adam Theisen
"""
import act
# Create an empty dataset using an ARM DOD
ds = act.io.armfiles.create_ds_from_arm_dod('vdis.b1', {'time': 1440}, scalar_fill_dim='time')
# Print out the xarray dataset to see that it's empty
print(ds)
# The user could populate this through a number of ways
# and that's best left up to the user on how to do it.
# If one has an existing dataset, a mapping of variable
# names is sometimes the easiest way
# Let's look at some variable attributes
# These can be updated and it would be up to the
# user to ensure these tests are being applied
# and are appropriately set in the cooresponding QC variable
print(ds['num_drops'].attrs)
# Next, let's print out the global attribuets
print(ds.attrs)
# Add additional attributes or append to existing
# if they are needed using a dictionary
atts = {
'command_line': 'python plot_create_arm_ds.py',
'process_version': '1.2.3',
'history': 'Processed with Jupyter Workbench',
'random': '1234253sdgfadf'
}
for a in atts:
if a in ds.attrs:
ds.attrs[a] += atts[a]
else:
ds.attrs[a] = atts[a]
# Print out the attribute
print(a, ds.attrs[a])
# Write data out to netcdf
ds.to_netcdf('./sgpvdisX1.b1.20230101.000000.nc')
# If one wants to clean up the dataset to better match CF standards
# the following can be done as well
ds.write.write_netcdf(cf_compliant=True, path='./sgpvdisX1.b1.20230101.000000.cf')
<|code_end|>
|
Make SkewT Display More Flexible with Matplotlib
While taking a look at #606 , I noticed that there is not currently a way to feed in a user-defined figure or axes to the SkewT Display. This would be an important feature to add, enabling users to customize their plots more, and interact with other displays.
|
act/plotting/skewtdisplay.py
<|code_start|>"""
Stores the class for SkewTDisplay.
"""
import warnings
# Import third party libraries
import matplotlib.pyplot as plt
import metpy
import metpy.calc as mpcalc
from metpy.plots import SkewT
from metpy.units import units
import numpy as np
import scipy
from copy import deepcopy
# Import Local Libs
from ..utils import datetime_utils as dt_utils
from .plot import Display
class SkewTDisplay(Display):
"""
A class for making Skew-T plots.
This is inherited from the :func:`act.plotting.Display`
class and has therefore has the same attributes as that class.
See :func:`act.plotting.Display`
for more information. There are no additional attributes or parameters
to this class.
In order to create Skew-T plots, ACT needs the MetPy package to be
installed on your system. More information about
MetPy go here: https://unidata.github.io/MetPy/latest/index.html.
Examples
--------
Here is an example of how to make a Skew-T plot using ACT:
.. code-block :: python
sonde_ds = act.io.armfiles.read_netcdf(
act.tests.sample_files.EXAMPLE_SONDE1)
skewt = act.plotting.SkewTDisplay(sonde_ds)
skewt.plot_from_u_and_v('u_wind', 'v_wind', 'pres', 'tdry', 'dp')
plt.show()
"""
def __init__(self, ds, subplot_shape=(1,), ds_name=None, **kwargs):
# We want to use our routine to handle subplot adding, not the main
# one
new_kwargs = kwargs.copy()
super().__init__(ds, None, ds_name, subplot_kw=dict(projection='skewx'), **new_kwargs)
# Make a SkewT object for each subplot
self.add_subplots(subplot_shape, **kwargs)
def add_subplots(self, subplot_shape=(1,), **kwargs):
"""
Adds subplots to the Display object. The current
figure in the object will be deleted and overwritten.
Parameters
----------
subplot_shape : 1 or 2D tuple, list, or array
The structure of the subplots in (rows, cols).
subplot_kw : dict, optional
The kwargs to pass into fig.subplots.
**kwargs : keyword arguments
Any other keyword arguments that will be passed
into :func:`matplotlib.pyplot.figure` when the figure
is made. The figure is only made if the *fig*
property is None. See the matplotlib
documentation for further details on what keyword
arguments are available.
"""
del self.axes
if self.fig is None:
self.fig = plt.figure(**kwargs)
self.SkewT = np.empty(shape=subplot_shape, dtype=SkewT)
self.axes = np.empty(shape=subplot_shape, dtype=plt.Axes)
if len(subplot_shape) == 1:
for i in range(subplot_shape[0]):
subplot_tuple = (subplot_shape[0], 1, i + 1)
self.SkewT[i] = SkewT(fig=self.fig, subplot=subplot_tuple)
self.axes[i] = self.SkewT[i].ax
elif len(subplot_shape) == 2:
for i in range(subplot_shape[0]):
for j in range(subplot_shape[1]):
subplot_tuple = (
subplot_shape[0],
subplot_shape[1],
i * subplot_shape[1] + j + 1,
)
self.SkewT[i, j] = SkewT(fig=self.fig, subplot=subplot_tuple)
self.axes[i, j] = self.SkewT[i, j].ax
else:
raise ValueError('Subplot shape must be 1 or 2D!')
def set_xrng(self, xrng, subplot_index=(0,)):
"""
Sets the x range of the plot.
Parameters
----------
xrng : 2 number array.
The x limits of the plot.
subplot_index : 1 or 2D tuple, list, or array
The index of the subplot to set the x range of.
"""
if self.axes is None:
raise RuntimeError('set_xrng requires the plot to be displayed.')
if not hasattr(self, 'xrng') or np.all(self.xrng == 0):
if len(self.axes.shape) == 2:
self.xrng = np.zeros((self.axes.shape[0], self.axes.shape[1], 2))
else:
self.xrng = np.zeros((self.axes.shape[0], 2))
self.axes[subplot_index].set_xlim(xrng)
self.xrng[subplot_index, :] = np.array(xrng)
def set_yrng(self, yrng, subplot_index=(0,)):
"""
Sets the y range of the plot.
Parameters
----------
yrng : 2 number array
The y limits of the plot.
subplot_index : 1 or 2D tuple, list, or array
The index of the subplot to set the x range of.
"""
if self.axes is None:
raise RuntimeError('set_yrng requires the plot to be displayed.')
if not hasattr(self, 'yrng') or np.all(self.yrng == 0):
if len(self.axes.shape) == 2:
self.yrng = np.zeros((self.axes.shape[0], self.axes.shape[1], 2))
else:
self.yrng = np.zeros((self.axes.shape[0], 2))
if not hasattr(self, 'yrng') and len(self.axes.shape) == 2:
self.yrng = np.zeros((self.axes.shape[0], self.axes.shape[1], 2))
elif not hasattr(self, 'yrng') and len(self.axes.shape) == 1:
self.yrng = np.zeros((self.axes.shape[0], 2))
if yrng[0] == yrng[1]:
yrng[1] = yrng[1] + 1
self.axes[subplot_index].set_ylim(yrng)
self.yrng[subplot_index, :] = yrng
def plot_from_spd_and_dir(
self, spd_field, dir_field, p_field, t_field, td_field, dsname=None, **kwargs
):
"""
This plot will make a sounding plot from wind data that is given
in speed and direction.
Parameters
----------
spd_field : str
The name of the field corresponding to the wind speed.
dir_field : str
The name of the field corresponding to the wind direction
in degrees from North.
p_field : str
The name of the field containing the atmospheric pressure.
t_field : str
The name of the field containing the atmospheric temperature.
td_field : str
The name of the field containing the dewpoint.
dsname : str or None
The name of the datastream to plot. Set to None to make ACT
attempt to automatically determine this.
kwargs : dict
Additional keyword arguments will be passed into
:func:`act.plotting.SkewTDisplay.plot_from_u_and_v`
Returns
-------
ax : matplotlib axis handle
The matplotlib axis handle corresponding to the plot.
"""
if dsname is None and len(self._ds.keys()) > 1:
raise ValueError(
'You must choose a datastream when there are 2 '
'or more datasets in the TimeSeriesDisplay '
'object.'
)
elif dsname is None:
dsname = list(self._ds.keys())[0]
# Make temporary field called tempu, tempv
spd = self._ds[dsname][spd_field].values
dir = self._ds[dsname][dir_field].values
with warnings.catch_warnings():
warnings.filterwarnings('ignore', category=RuntimeWarning)
tempu = -np.sin(np.deg2rad(dir)) * spd
tempv = -np.cos(np.deg2rad(dir)) * spd
self._ds[dsname]['temp_u'] = deepcopy(self._ds[dsname][spd_field])
self._ds[dsname]['temp_v'] = deepcopy(self._ds[dsname][spd_field])
self._ds[dsname]['temp_u'].values = tempu
self._ds[dsname]['temp_v'].values = tempv
the_ax = self.plot_from_u_and_v(
'temp_u', 'temp_v', p_field, t_field, td_field, dsname, **kwargs
)
del self._ds[dsname]['temp_u'], self._ds[dsname]['temp_v']
return the_ax
def plot_from_u_and_v(
self,
u_field,
v_field,
p_field,
t_field,
td_field,
dsname=None,
subplot_index=(0,),
p_levels_to_plot=None,
show_parcel=True,
shade_cape=True,
shade_cin=True,
set_title=None,
smooth_p=3,
plot_barbs_kwargs=dict(),
plot_kwargs=dict(),
):
"""
This function will plot a Skew-T from a sounding dataset. The wind
data must be given in u and v.
Parameters
----------
u_field : str
The name of the field containing the u component of the wind.
v_field : str
The name of the field containing the v component of the wind.
p_field : str
The name of the field containing the pressure.
t_field : str
The name of the field containing the temperature.
td_field : str
The name of the field containing the dewpoint temperature.
dsname : str or None
The name of the datastream to plot. Set to None to make ACT
attempt to automatically determine this.
subplot_index : tuple
The index of the subplot to make the plot on.
p_levels_to_plot : 1D array
The pressure levels to plot the wind barbs on. Set to None
to have ACT to use neatly spaced defaults of
50, 100, 200, 300, 400, 500, 600, 700, 750, 800,
850, 900, 950, and 1000 hPa.
show_parcel : bool
Set to True to show the temperature of a parcel lifted
from the surface.
shade_cape : bool
Set to True to shade the CAPE red.
shade_cin : bool
Set to True to shade the CIN blue.
set_title : None or str
The title of the plot is set to this. Set to None to use
a default title.
smooth_p : int
If pressure is not in descending order, will smooth the data
using this many points to try and work around the issue.
Default is 3 but inthe pbl retrieval code we have to default to 5 at times
plot_barbs_kwargs : dict
Additional keyword arguments to pass into MetPy's
SkewT.plot_barbs.
plot_kwargs : dict
Additional keyword arguments to pass into MetPy's
SkewT.plot.
Returns
-------
ax : matplotlib axis handle
The axis handle to the plot.
"""
if dsname is None and len(self._ds.keys()) > 1:
raise ValueError(
'You must choose a datastream when there are 2 '
'or more datasets in the TimeSeriesDisplay '
'object.'
)
elif dsname is None:
dsname = list(self._ds.keys())[0]
if p_levels_to_plot is None:
p_levels_to_plot = np.array(
[
50.0,
100.0,
200.0,
300.0,
400.0,
500.0,
600.0,
700.0,
750.0,
800.0,
850.0,
900.0,
950.0,
1000.0,
]
) * units('hPa')
# Get pressure and smooth if not in order
p = self._ds[dsname][p_field]
if not all(p[i] <= p[i + 1] for i in range(len(p) - 1)):
self._ds[dsname][p_field] = (
self._ds[dsname][p_field].rolling(time=smooth_p, min_periods=1, center=True).mean()
)
p = self._ds[dsname][p_field]
p_units = self._ds[dsname][p_field].attrs['units']
p = p.values * getattr(units, p_units)
T = self._ds[dsname][t_field]
T_units = self._ds[dsname][t_field].attrs['units']
if T_units == 'C':
T_units = 'degC'
T = T.values * getattr(units, T_units)
Td = self._ds[dsname][td_field]
Td_units = self._ds[dsname][td_field].attrs['units']
if Td_units == 'C':
Td_units = 'degC'
Td = Td.values * getattr(units, Td_units)
u = self._ds[dsname][u_field]
u_units = self._ds[dsname][u_field].attrs['units']
u = u.values * getattr(units, u_units)
v = self._ds[dsname][v_field]
v_units = self._ds[dsname][v_field].attrs['units']
v = v.values * getattr(units, v_units)
u_red = np.zeros_like(p_levels_to_plot) * getattr(units, u_units)
v_red = np.zeros_like(p_levels_to_plot) * getattr(units, v_units)
# Check p_levels_to_plot units, and convert to p units if needed
if not hasattr(p_levels_to_plot, 'units'):
p_levels_to_plot = p_levels_to_plot * getattr(units, p_units)
else:
p_levels_to_plot = p_levels_to_plot.to(p_units)
for i in range(len(p_levels_to_plot)):
index = np.argmin(np.abs(p_levels_to_plot[i] - p))
u_red[i] = u[index].magnitude * getattr(units, u_units)
v_red[i] = v[index].magnitude * getattr(units, v_units)
self.SkewT[subplot_index].plot(p, T, 'r', **plot_kwargs)
self.SkewT[subplot_index].plot(p, Td, 'g', **plot_kwargs)
self.SkewT[subplot_index].plot_barbs(
p_levels_to_plot.magnitude, u_red, v_red, **plot_barbs_kwargs
)
# Metpy fix if Pressure does not decrease monotonically in
# your sounding.
try:
prof = mpcalc.parcel_profile(p, T[0], Td[0]).to('degC')
except metpy.calc.exceptions.InvalidSoundingError:
p = scipy.ndimage.median_filter(p, 3, output=float)
p = metpy.units.units.Quantity(p, p_units)
prof = mpcalc.parcel_profile(p, T[0], Td[0]).to('degC')
if show_parcel:
# Only plot where prof > T
lcl_pressure, lcl_temperature = mpcalc.lcl(p[0], T[0], Td[0])
self.SkewT[subplot_index].plot(
lcl_pressure, lcl_temperature, 'ko', markerfacecolor='black', **plot_kwargs
)
self.SkewT[subplot_index].plot(p, prof, 'k', linewidth=2, **plot_kwargs)
if shade_cape:
self.SkewT[subplot_index].shade_cape(p, T, prof, linewidth=2)
if shade_cin:
self.SkewT[subplot_index].shade_cin(p, T, prof, linewidth=2)
# Set Title
if set_title is None:
set_title = ' '.join(
[
dsname,
'on',
dt_utils.numpy_to_arm_date(self._ds[dsname].time.values[0]),
]
)
self.axes[subplot_index].set_title(set_title)
# Set Y Limit
our_data = p.magnitude
if np.isfinite(our_data).any():
yrng = [np.nanmax(our_data), np.nanmin(our_data)]
else:
yrng = [1000.0, 100.0]
self.set_yrng(yrng, subplot_index)
# Set X Limit
xrng = [np.nanmin(T.magnitude) - 10.0, np.nanmax(T.magnitude) + 10.0]
self.set_xrng(xrng, subplot_index)
return self.axes[subplot_index]
<|code_end|>
examples/plotting/plot_enhanced_skewt.py
<|code_start|><|code_end|>
|
act/plotting/skewtdisplay.py
<|code_start|>"""
Stores the class for SkewTDisplay.
"""
import warnings
# Import third party libraries
import metpy
import metpy.calc as mpcalc
from metpy.plots import SkewT, Hodograph
from metpy.units import units
import matplotlib.pyplot as plt
import numpy as np
import scipy
from copy import deepcopy
# Import Local Libs
from ..utils import datetime_utils as dt_utils
from .plot import Display
from ..retrievals import calculate_stability_indicies
class SkewTDisplay(Display):
"""
A class for making Skew-T plots.
This is inherited from the :func:`act.plotting.Display`
class and has therefore has the same attributes as that class.
See :func:`act.plotting.Display`
for more information. There are no additional attributes or parameters
to this class.
In order to create Skew-T plots, ACT needs the MetPy package to be
installed on your system. More information about
MetPy go here: https://unidata.github.io/MetPy/latest/index.html.
Examples
--------
Here is an example of how to make a Skew-T plot using ACT:
.. code-block :: python
sonde_ds = act.io.armfiles.read_netcdf(
act.tests.sample_files.EXAMPLE_SONDE1)
skewt = act.plotting.SkewTDisplay(sonde_ds)
skewt.plot_from_u_and_v('u_wind', 'v_wind', 'pres', 'tdry', 'dp')
plt.show()
"""
def __init__(self, ds, subplot_shape=(1,), subplot=None, ds_name=None, set_fig=None, **kwargs):
# We want to use our routine to handle subplot adding, not the main
# one
new_kwargs = kwargs.copy()
super().__init__(ds, None, ds_name, subplot_kw=dict(projection='skewx'), **new_kwargs)
# Make a SkewT object for each subplot
self.add_subplots(subplot_shape, set_fig=set_fig, subplot=subplot, **kwargs)
def add_subplots(self, subplot_shape=(1,), set_fig=None, subplot=None, **kwargs):
"""
Adds subplots to the Display object. The current
figure in the object will be deleted and overwritten.
Parameters
----------
subplot_shape : 1 or 2D tuple, list, or array
The structure of the subplots in (rows, cols).
subplot_kw : dict, optional
The kwargs to pass into fig.subplots.
set_fig : matplotlib figure, optional
Figure to pass to SkewT
**kwargs : keyword arguments
Any other keyword arguments that will be passed
into :func:`matplotlib.pyplot.figure` when the figure
is made. The figure is only made if the *fig*
property is None. See the matplotlib
documentation for further details on what keyword
arguments are available.
"""
del self.axes
if self.fig is None and set_fig is None:
self.fig = plt.figure(**kwargs)
if set_fig is not None:
self.fig = set_fig
self.SkewT = np.empty(shape=subplot_shape, dtype=SkewT)
self.axes = np.empty(shape=subplot_shape, dtype=plt.Axes)
if len(subplot_shape) == 1:
for i in range(subplot_shape[0]):
if subplot is None:
subplot_tuple = (subplot_shape[0], 1, i + 1)
else:
subplot_tuple = subplot
self.SkewT[i] = SkewT(fig=self.fig, subplot=subplot_tuple)
self.axes[i] = self.SkewT[i].ax
elif len(subplot_shape) == 2:
for i in range(subplot_shape[0]):
for j in range(subplot_shape[1]):
subplot_tuple = (
subplot_shape[0],
subplot_shape[1],
i * subplot_shape[1] + j + 1,
)
self.SkewT[i, j] = SkewT(fig=self.fig, subplot=subplot_tuple)
self.axes[i, j] = self.SkewT[i, j].ax
else:
raise ValueError('Subplot shape must be 1 or 2D!')
def set_xrng(self, xrng, subplot_index=(0,)):
"""
Sets the x range of the plot.
Parameters
----------
xrng : 2 number array.
The x limits of the plot.
subplot_index : 1 or 2D tuple, list, or array
The index of the subplot to set the x range of.
"""
if self.axes is None:
raise RuntimeError('set_xrng requires the plot to be displayed.')
if not hasattr(self, 'xrng') or np.all(self.xrng == 0):
if len(self.axes.shape) == 2:
self.xrng = np.zeros((self.axes.shape[0], self.axes.shape[1], 2))
else:
self.xrng = np.zeros((self.axes.shape[0], 2))
self.axes[subplot_index].set_xlim(xrng)
self.xrng[subplot_index, :] = np.array(xrng)
def set_yrng(self, yrng, subplot_index=(0,)):
"""
Sets the y range of the plot.
Parameters
----------
yrng : 2 number array
The y limits of the plot.
subplot_index : 1 or 2D tuple, list, or array
The index of the subplot to set the x range of.
"""
if self.axes is None:
raise RuntimeError('set_yrng requires the plot to be displayed.')
if not hasattr(self, 'yrng') or np.all(self.yrng == 0):
if len(self.axes.shape) == 2:
self.yrng = np.zeros((self.axes.shape[0], self.axes.shape[1], 2))
else:
self.yrng = np.zeros((self.axes.shape[0], 2))
if not hasattr(self, 'yrng') and len(self.axes.shape) == 2:
self.yrng = np.zeros((self.axes.shape[0], self.axes.shape[1], 2))
elif not hasattr(self, 'yrng') and len(self.axes.shape) == 1:
self.yrng = np.zeros((self.axes.shape[0], 2))
if yrng[0] == yrng[1]:
yrng[1] = yrng[1] + 1
self.axes[subplot_index].set_ylim(yrng)
self.yrng[subplot_index, :] = yrng
def plot_from_spd_and_dir(
self, spd_field, dir_field, p_field, t_field, td_field, dsname=None, **kwargs
):
"""
This plot will make a sounding plot from wind data that is given
in speed and direction.
Parameters
----------
spd_field : str
The name of the field corresponding to the wind speed.
dir_field : str
The name of the field corresponding to the wind direction
in degrees from North.
p_field : str
The name of the field containing the atmospheric pressure.
t_field : str
The name of the field containing the atmospheric temperature.
td_field : str
The name of the field containing the dewpoint.
dsname : str or None
The name of the datastream to plot. Set to None to make ACT
attempt to automatically determine this.
kwargs : dict
Additional keyword arguments will be passed into
:func:`act.plotting.SkewTDisplay.plot_from_u_and_v`
Returns
-------
ax : matplotlib axis handle
The matplotlib axis handle corresponding to the plot.
"""
if dsname is None and len(self._ds.keys()) > 1:
raise ValueError(
'You must choose a datastream when there are 2 '
'or more datasets in the TimeSeriesDisplay '
'object.'
)
elif dsname is None:
dsname = list(self._ds.keys())[0]
# Make temporary field called tempu, tempv
spd = self._ds[dsname][spd_field].values * units(self._ds[dsname][spd_field].attrs['units'])
dir = self._ds[dsname][dir_field].values * units(self._ds[dsname][dir_field].attrs['units'])
tempu, tempv = mpcalc.wind_components(spd, dir)
self._ds[dsname]['temp_u'] = deepcopy(self._ds[dsname][spd_field])
self._ds[dsname]['temp_v'] = deepcopy(self._ds[dsname][spd_field])
self._ds[dsname]['temp_u'].values = tempu
self._ds[dsname]['temp_v'].values = tempv
the_ax = self.plot_from_u_and_v(
'temp_u', 'temp_v', p_field, t_field, td_field, dsname, **kwargs
)
del self._ds[dsname]['temp_u'], self._ds[dsname]['temp_v']
return the_ax
def plot_from_u_and_v(
self,
u_field,
v_field,
p_field,
t_field,
td_field,
dsname=None,
subplot_index=(0,),
p_levels_to_plot=None,
show_parcel=True,
shade_cape=True,
shade_cin=True,
set_title=None,
smooth_p=3,
plot_barbs_kwargs=dict(),
plot_kwargs=dict(),
):
"""
This function will plot a Skew-T from a sounding dataset. The wind
data must be given in u and v.
Parameters
----------
u_field : str
The name of the field containing the u component of the wind.
v_field : str
The name of the field containing the v component of the wind.
p_field : str
The name of the field containing the pressure.
t_field : str
The name of the field containing the temperature.
td_field : str
The name of the field containing the dewpoint temperature.
dsname : str or None
The name of the datastream to plot. Set to None to make ACT
attempt to automatically determine this.
subplot_index : tuple
The index of the subplot to make the plot on.
p_levels_to_plot : 1D array
The pressure levels to plot the wind barbs on. Set to None
to have ACT to use neatly spaced defaults of
25, 50, 75, 100, 150, 200, 250, 300, 400, 500, 600, 700, 750, 800,
850, 900, 950, and 1000 hPa.
show_parcel : bool
Set to True to show the temperature of a parcel lifted
from the surface.
shade_cape : bool
Set to True to shade the CAPE red.
shade_cin : bool
Set to True to shade the CIN blue.
set_title : None or str
The title of the plot is set to this. Set to None to use
a default title.
smooth_p : int
If pressure is not in descending order, will smooth the data
using this many points to try and work around the issue.
Default is 3 but inthe pbl retrieval code we have to default to 5 at times
plot_barbs_kwargs : dict
Additional keyword arguments to pass into MetPy's
SkewT.plot_barbs.
plot_kwargs : dict
Additional keyword arguments to pass into MetPy's
SkewT.plot.
Returns
-------
ax : matplotlib axis handle
The axis handle to the plot.
"""
if dsname is None and len(self._ds.keys()) > 1:
raise ValueError(
'You must choose a datastream when there are 2 '
'or more datasets in the TimeSeriesDisplay '
'object.'
)
elif dsname is None:
dsname = list(self._ds.keys())[0]
if p_levels_to_plot is None:
p_levels_to_plot = np.array(
[
25.0,
50.0,
75.0,
100.0,
150.0,
200.0,
250.0,
300.0,
400.0,
500.0,
600.0,
700.0,
750.0,
800.0,
850.0,
900.0,
950.0,
1000.0,
]
) * units('hPa')
# Get pressure and smooth if not in order
p = self._ds[dsname][p_field]
if not all(p[i] <= p[i + 1] for i in range(len(p) - 1)):
self._ds[dsname][p_field] = (
self._ds[dsname][p_field].rolling(time=smooth_p, min_periods=1, center=True).mean()
)
p = self._ds[dsname][p_field]
p_units = self._ds[dsname][p_field].attrs['units']
p = p.values * getattr(units, p_units)
T = self._ds[dsname][t_field]
T_units = self._ds[dsname][t_field].attrs['units']
if T_units == 'C':
T_units = 'degC'
T = T.values * getattr(units, T_units)
Td = self._ds[dsname][td_field]
Td_units = self._ds[dsname][td_field].attrs['units']
if Td_units == 'C':
Td_units = 'degC'
Td = Td.values * getattr(units, Td_units)
u = self._ds[dsname][u_field]
u_units = self._ds[dsname][u_field].attrs['units']
u = u.values * getattr(units, u_units)
v = self._ds[dsname][v_field]
v_units = self._ds[dsname][v_field].attrs['units']
v = v.values * getattr(units, v_units)
u_red = np.zeros_like(p_levels_to_plot) * getattr(units, u_units)
v_red = np.zeros_like(p_levels_to_plot) * getattr(units, v_units)
# Check p_levels_to_plot units, and convert to p units if needed
if not hasattr(p_levels_to_plot, 'units'):
p_levels_to_plot = p_levels_to_plot * getattr(units, p_units)
else:
p_levels_to_plot = p_levels_to_plot.to(p_units)
for i in range(len(p_levels_to_plot)):
index = np.argmin(np.abs(p_levels_to_plot[i] - p))
u_red[i] = u[index].magnitude * getattr(units, u_units)
v_red[i] = v[index].magnitude * getattr(units, v_units)
self.SkewT[subplot_index].plot(p, T, 'r', **plot_kwargs)
self.SkewT[subplot_index].plot(p, Td, 'g', **plot_kwargs)
self.SkewT[subplot_index].plot_barbs(
p_levels_to_plot.magnitude, u_red, v_red, **plot_barbs_kwargs
)
# Metpy fix if Pressure does not decrease monotonically in
# your sounding.
try:
prof = mpcalc.parcel_profile(p, T[0], Td[0]).to('degC')
except metpy.calc.exceptions.InvalidSoundingError:
p = scipy.ndimage.median_filter(p, 3, output=float)
p = metpy.units.units.Quantity(p, p_units)
prof = mpcalc.parcel_profile(p, T[0], Td[0]).to('degC')
if show_parcel:
# Only plot where prof > T
lcl_pressure, lcl_temperature = mpcalc.lcl(p[0], T[0], Td[0])
self.SkewT[subplot_index].plot(
lcl_pressure, lcl_temperature, 'ko', markerfacecolor='black', **plot_kwargs
)
self.SkewT[subplot_index].plot(p, prof, 'k', linewidth=2, **plot_kwargs)
if shade_cape:
self.SkewT[subplot_index].shade_cape(p, T, prof, linewidth=2)
if shade_cin:
self.SkewT[subplot_index].shade_cin(p, T, prof, linewidth=2)
# Set Title
if set_title is None:
set_title = ' '.join(
[
dsname,
'on',
dt_utils.numpy_to_arm_date(self._ds[dsname].time.values[0]),
]
)
self.axes[subplot_index].set_title(set_title)
# Set Y Limit
our_data = p.magnitude
if np.isfinite(our_data).any():
yrng = [np.nanmax(our_data), np.nanmin(our_data)]
else:
yrng = [1000.0, 100.0]
self.set_yrng(yrng, subplot_index)
# Set X Limit
xrng = [np.nanmin(T.magnitude) - 10.0, np.nanmax(T.magnitude) + 10.0]
self.set_xrng(xrng, subplot_index)
return self.axes[subplot_index]
def plot_hodograph(
self, spd_field, dir_field, color_field=None, set_fig=None, set_axes=None,
component_range=80, dsname=None, uv_flag=False
):
"""
This will plot a hodograph from the radiosonde wind data using
MetPy
Parameters
----------
spd_field : str
The name of the field corresponding to the wind speed.
dir_field : str
The name of the field corresponding to the wind direction
in degrees from North.
color_field : str, optional
The name of the field if wanting to shade by another variable
set_fig : matplotlib figure, optional
The figure to plot on
set_axes : matplotlib axes, optional
The specific axes to plot on
component_range : int
Range of the hodograph. Default is 80
dsname : str
Name of the datastream to plot if multiple in the plot object
uv_flag : boolean
If set to True, spd_field and dir_field will be treated as the
U and V wind variable names
Returns
-------
self.axes : matplotlib axes
"""
if dsname is None and len(self._ds.keys()) > 1:
raise ValueError(
'You must choose a datastream when there are 2 '
'or more datasets in the TimeSeriesDisplay '
'object.'
)
elif dsname is None:
dsname = list(self._ds.keys())[0]
# Get the current plotting axis
if set_fig is not None:
self.fig = set_fig
if set_axes is not None:
self.axes = set_axes
if self.fig is None:
self.fig = plt.figure()
if self.axes is None:
self.axes = np.array([plt.axes()])
self.fig.add_axes(self.axes[0])
# Calculate u/v wind components from speed/direction
if uv_flag is False:
spd = self._ds[dsname][spd_field].values * units(self._ds[dsname][spd_field].attrs['units'])
dir = self._ds[dsname][dir_field].values * units(self._ds[dsname][dir_field].attrs['units'])
u, v = mpcalc.wind_components(spd, dir)
else:
u = self._ds[dsname][spd_field].values * units(self._ds[dsname][spd_field].attrs['units'])
v = self._ds[dsname][dir_field].values * units(self._ds[dsname][dir_field].attrs['units'])
# Plot out the data using the Hodograph method
h = Hodograph(self.axes, component_range=component_range)
h.add_grid(increment=20)
if color_field is None:
h.plot(u, v)
else:
data = self._ds[dsname][color_field].values *\
units(self._ds[dsname][color_field].attrs['units'])
h.plot_colormapped(u, v, data)
return self.axes
def add_stability_info(
self, temp_name='tdry', td_name='dp', p_name='pres', rh_name='rh',
overwrite_data=None, add_data=None, set_fig=None, set_axes=None, dsname=None
):
"""
This plot will make a sounding plot from wind data that is given
in speed and direction.
Parameters
----------
temp_name : str
The name of the temperature field.
td_name : str
The name of the dewpoint field.
p_name : str
The name of the pressure field.
rh_name : str
The name of the relative humidity field.
overwrite_data : dict
A disctionary of variables/values to write out instead
of the ones calculated by MetPy. Needs to be of the form
.. code-block:: python
overwrite_data={'LCL': 234, 'CAPE': 25}
...
add_data : dict
A dictionary of variables and values to write out in
addition to the MetPy calculated ones
set_fig : matplotlib figure, optional
The figure to plot on
set_axes : matplotlib axes, optional
The specific axes to plot on
dsname : str
Name of the datastream to plot if multiple in the plot object
Returns
-------
self.axes : matplotlib axes
"""
if dsname is None and len(self._ds.keys()) > 1:
raise ValueError(
'You must choose a datastream when there are 2 '
'or more datasets in the TimeSeriesDisplay '
'object.'
)
elif dsname is None:
dsname = list(self._ds.keys())[0]
# Get the current plotting axis
if set_fig is not None:
self.fig = set_fig
if set_axes is not None:
self.axes = set_axes
if self.fig is None:
self.fig = plt.figure()
if self.axes is None:
self.axes = np.array([plt.axes()])
self.fig.add_axes(self.axes[0])
self.axes.spines['top'].set_visible(False)
self.axes.spines['right'].set_visible(False)
self.axes.spines['bottom'].set_visible(False)
self.axes.spines['left'].set_visible(False)
self.axes.get_xaxis().set_ticks([])
self.axes.get_yaxis().set_ticks([])
ct = 0
if overwrite_data is None:
# Calculate stability indicies
ds_sonde = calculate_stability_indicies(
self._ds[dsname], temp_name=temp_name, td_name=td_name, p_name=p_name, rh_name=rh_name,
)
# Add MetPy calculated variables to the list
variables = {
'lifted_index': 'Lifted Index',
'surface_based_cape': 'SBCAPE',
'surface_based_cin': 'SBCIN',
'most_unstable_cape': 'MUCAPE',
'most_unstable_cin': 'MUCIN',
'lifted_condensation_level_temperature': 'LCL Temp',
'lifted_condensation_level_pressure': 'LCL Pres',
}
for i, v in enumerate(variables):
var_string = str(np.round(ds_sonde[v].values, 2))
self.axes.text(
-0.05, (0.98 - (0.1 * i)),
variables[v] + ': ', transform=self.axes.transAxes,
fontsize=10, verticalalignment='top'
)
self.axes.text(
0.95, (0.98 - (0.1 * i)),
var_string, transform=self.axes.transAxes,
fontsize=10, verticalalignment='top', horizontalalignment='right'
)
ct += 1
else:
# If overwrite_data is set, the user passes in their own dictionary
for i, v in enumerate(overwrite_data):
var_string = str(np.round(overwrite_data[v], 2))
self.axes.text(
-0.05, (0.98 - (0.1 * i)),
v + ': ', transform=self.axes.transAxes,
fontsize=10, verticalalignment='top'
)
self.axes.text(
0.95, (0.98 - (0.1 * i)),
var_string, transform=self.axes.transAxes,
fontsize=10, verticalalignment='top', horizontalalignment='right'
)
# User can also add variables to the existing ones calculated by MetPy
if add_data is not None:
for i, v in enumerate(add_data):
var_string = str(np.round(add_data[v], 2))
self.axes.text(
-0.05, (0.98 - (0.1 * (i + ct))),
v + ': ', transform=self.axes.transAxes,
fontsize=10, verticalalignment='top',
)
self.axes.text(
0.95, (0.98 - (0.1 * (i + ct))),
var_string, transform=self.axes.transAxes,
fontsize=10, verticalalignment='top', horizontalalignment='right'
)
return self.axes
def plot_enhanced_skewt(
self, spd_name='wspd', dir_name='deg', temp_name='tdry', td_name='dp', p_name='pres', rh_name='rh',
overwrite_data=None, add_data=None, color_field=None, component_range=80, uv_flag=False, dsname=None,
figsize=(14, 10)
):
"""
This will plot an enhanced Skew-T plot with a Hodograph on the top right
and the stability parameters on the lower right. This will create a new
figure so that one does not need to be defined through subplot_shape.
Requires Matplotlib v 3.7 and higher
Parameters
----------
spd_name : str
The name of the field corresponding to the wind speed.
dir_name : str
The name of the field corresponding to the wind direction
in degrees from North.
temp_name : str
The name of the temperature field.
td_name : str
The name of the dewpoint field.
p_name : str
The name of the pressure field.
rh_name : str
The name of the relative humidity field.
overwrite_data : dict
A disctionary of variables/values to write out instead
of the ones calculated by MetPy. Needs to be of the form
.. code-block:: python
overwrite_data={'LCL': 234, 'CAPE': 25}
...
add_data : dict
A dictionary of variables and values to write out in
addition to the MetPy calculated ones
color_field : str, optional
The name of the field if wanting to shade by another variable
component_range : int
Range of the hodograph. Default is 80
uv_flag : boolean
If set to True, spd_field and dir_field will be treated as the
U and V wind variable names
dsname : str
Name of the datastream to plot if multiple in the plot object
figsize : tuple
Figure size for the plot
Returns
-------
self.axes : matplotlib axes
"""
# Set up the figure and axes
# Close existing figure as a new one will be created
plt.close('all')
subplot_kw = {"a": {"projection": "skewx"}}
fig, axs = plt.subplot_mosaic(
[['a', 'a', 'b'], ['a', 'a', 'b'], ['a', 'a', 'c'], ['a', 'a', 'c']],
layout='constrained', per_subplot_kw=subplot_kw
)
self.fig = fig
self.axes = axs
# Plot out the Skew-T
display = SkewTDisplay(self._ds, set_fig=fig, subplot=axs['a'], figsize=figsize)
if uv_flag is True:
display.plot_from_u_and_v(spd_name, dir_name, p_name, temp_name, td_name)
else:
display.plot_from_spd_and_dir(spd_name, dir_name, p_name, temp_name, td_name)
# Plot the hodograph
display.plot_hodograph(spd_name, dir_name, set_axes=axs['b'], color_field=color_field,
component_range=component_range, dsname=dsname, uv_flag=uv_flag)
# Add Stability information
display.add_stability_info(set_axes=axs['c'], temp_name=temp_name, td_name=td_name,
p_name=p_name, rh_name=rh_name, overwrite_data=overwrite_data,
add_data=add_data, dsname=dsname)
return self.axes
<|code_end|>
examples/plotting/plot_enhanced_skewt.py
<|code_start|>"""
Enhanced plot of a sounding
---------------------------
This example shows how to make an enhance plot for sounding data
which includes a Skew-T plot, hodograph, and stability indicies.
Author: Adam Theisen
"""
import glob
import metpy
import numpy as np
import xarray as xr
from matplotlib import pyplot as plt
import act
# Read data
file = sorted(glob.glob(act.tests.sample_files.EXAMPLE_SONDE1))
ds = act.io.armfiles.read_netcdf(file)
# Plot enhanced Skew-T plot
display = act.plotting.SkewTDisplay(ds)
display.plot_enhanced_skewt(color_field='alt')
ds.close()
plt.show()
<|code_end|>
|
Setting y-range for groupby causes error
* ACT version: 1.4.2
* Python version: 3.10.9
* Operating System: Mac Os
### Description
Trying to set the y-range for a groupby plot creates an error
### What I Did
```
files = glob.glob('./Data/sgpmetE13.b1/*2021*')
files.sort()
obj = act.io.armfiles.read_netcdf(files)
display = act.plotting.TimeSeriesDisplay(obj, subplot_shape=(4, 3))
groupby = display.group_by('month')
groupby.plot_group('plot', None, field='temp_mean', marker=' ')
groupby.display.set_yrng([-20, 50])
plt.show()
```
Error
```
File "/Users/atheisen/Code/development-space/test_plotting.py", line 26, in <module>
groupby.display.set_yrng([-20, 50])
File "/Users/atheisen/Code/ACT/act/plotting/timeseriesdisplay.py", line 267, in set_yrng
self.axes[subplot_index].set_ylim(yrng)
AttributeError: 'numpy.ndarray' object has no attribute 'set_ylim'
```
|
act/plotting/timeseriesdisplay.py
<|code_start|>"""
Stores the class for TimeSeriesDisplay.
"""
import datetime as dt
import textwrap
import warnings
from copy import deepcopy
from re import search, search as re_search
import matplotlib as mpl
import matplotlib.dates as mdates
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from matplotlib import colors as mplcolors
from mpl_toolkits.axes_grid1 import make_axes_locatable
from scipy.interpolate import NearestNDInterpolator
from ..qc.qcfilter import parse_bit
from ..utils import data_utils, datetime_utils as dt_utils
from ..utils.datetime_utils import determine_time_delta, reduce_time_ranges
from ..utils.geo_utils import get_sunrise_sunset_noon
from . import common
from .plot import Display
class TimeSeriesDisplay(Display):
"""
This subclass contains routines that are specific to plotting
time series plots from data. It is inherited from Display and therefore
contains all of Display's attributes and methods.
Examples
--------
To create a TimeSeriesDisplay with 3 rows, simply do:
.. code-block:: python
ds = act.read_netcdf(the_file)
disp = act.plotting.TimeSeriesDisplay(ds, subplot_shape=(3,), figsize=(15, 5))
The TimeSeriesDisplay constructor takes in the same keyword arguments as
plt.subplots. For more information on the plt.subplots keyword arguments,
see the `matplotlib documentation
<https://matplotlib.org/api/_as_gen/matplotlib.pyplot.subplots.html>`_.
If no subplot_shape is provided, then no figure or axis will be created
until add_subplots or plots is called.
"""
def __init__(self, ds, subplot_shape=(1,), ds_name=None, **kwargs):
super().__init__(ds, subplot_shape, ds_name, **kwargs)
def day_night_background(self, dsname=None, subplot_index=(0,)):
"""
Colorcodes the background according to sunrise/sunset.
Parameters
----------
dsname : None or str
If there is more than one datastream in the display object the
name of the datastream needs to be specified. If set to None and
there is only one datastream then ACT will use the sole datastream
in the object.
subplot_index : 1 or 2D tuple, list, or array
The index to the subplot to place the day and night background in.
"""
if dsname is None and len(self._ds.keys()) > 1:
raise ValueError(
'You must choose a datastream to derive the '
+ 'information needed for the day and night '
+ 'background when 2 or more datasets are in '
+ 'the display object.'
)
elif dsname is None:
dsname = list(self._ds.keys())[0]
# Get File Dates
try:
file_dates = self._ds[dsname].attrs['_file_dates']
except KeyError:
file_dates = []
if len(file_dates) == 0:
sdate = dt_utils.numpy_to_arm_date(self._ds[dsname].time.values[0])
edate = dt_utils.numpy_to_arm_date(self._ds[dsname].time.values[-1])
file_dates = [sdate, edate]
all_dates = dt_utils.dates_between(file_dates[0], file_dates[-1])
if self.axes is None:
raise RuntimeError('day_night_background requires the plot to ' 'be displayed.')
ax = self.axes[subplot_index]
# Find variable names for latitude and longitude
variables = list(self._ds[dsname].data_vars)
lat_name = [var for var in ['lat', 'latitude'] if var in variables]
lon_name = [var for var in ['lon', 'longitude'] if var in variables]
if len(lat_name) == 0:
lat_name = None
else:
lat_name = lat_name[0]
if len(lon_name) == 0:
lon_name = None
else:
lon_name = lon_name[0]
# Variable name does not match, look for standard_name declaration
if lat_name is None or lon_name is None:
for var in variables:
try:
if self._ds[dsname][var].attrs['standard_name'] == 'latitude':
lat_name = var
except KeyError:
pass
try:
if self._ds[dsname][var].attrs['standard_name'] == 'longitude':
lon_name = var
except KeyError:
pass
if lat_name is not None and lon_name is not None:
break
if lat_name is None or lon_name is None:
return
# Extract latitude and longitude scalar from variable. If variable is a vector look
# for first non-Nan value.
lat_lon_list = [np.nan, np.nan]
for ii, var_name in enumerate([lat_name, lon_name]):
try:
values = self._ds[dsname][var_name].values
if values.size == 1:
lat_lon_list[ii] = float(values)
else:
# Look for non-NaN values to use for latitude locaiton. If not found use first value.
index = np.where(np.isfinite(values))[0]
if index.size == 0:
lat_lon_list[ii] = float(values[0])
else:
lat_lon_list[ii] = float(values[index[0]])
except AttributeError:
pass
for value, name in zip(lat_lon_list, ['Latitude', 'Longitude']):
if not np.isfinite(value):
warnings.warn(f"{name} value in dataset equal to '{value}' is not finite. ", RuntimeWarning)
return
lat = lat_lon_list[0]
lon = lat_lon_list[1]
lat_range = [-90, 90]
if not (lat_range[0] <= lat <= lat_range[1]):
warnings.warn(
f"Latitude value in dataset of '{lat}' not within acceptable "
f'range of {lat_range[0]} <= latitude <= {lat_range[1]}. ',
RuntimeWarning,
)
return
lon_range = [-180, 180]
if not (lon_range[0] <= lon <= lon_range[1]):
warnings.warn(
f"Longitude value in dataset of '{lon}' not within acceptable "
f'range of {lon_range[0]} <= longitude <= {lon_range[1]}. ',
RuntimeWarning,
)
return
# Initialize the plot to a gray background for total darkness
rect = ax.patch
rect.set_facecolor('0.85')
# Get date ranges to plot
plot_dates = []
for f in all_dates:
for ii in [-1, 0, 1]:
plot_dates.append(f + dt.timedelta(days=ii))
# Get sunrise, sunset and noon times
sunrise, sunset, noon = get_sunrise_sunset_noon(lat, lon, plot_dates)
# Plot daylight
for ii in range(0, len(sunrise)):
ax.axvspan(sunrise[ii], sunset[ii], facecolor='#FFFFCC', zorder=0)
# Plot noon line
for ii in noon:
ax.axvline(x=ii, linestyle='--', color='y', zorder=1)
def set_xrng(self, xrng, subplot_index=(0,)):
"""
Sets the x range of the plot.
Parameters
----------
xrng : 2 number array
The x limits of the plot.
subplot_index : 1 or 2D tuple, list, or array
The index of the subplot to set the x range of.
"""
if self.axes is None:
raise RuntimeError('set_xrng requires the plot to be displayed.')
# If the xlim is set to the same value for range it will throw a warning
# This is to catch that and expand the range so we avoid the warning.
if xrng[0] == xrng[1]:
if isinstance(xrng[0], np.datetime64):
print(f'\nAttempting to set xlim range to single value {xrng[0]}. '
'Expanding range by 2 seconds.\n')
xrng[0] -= np.timedelta64(1, 's')
xrng[1] += np.timedelta64(1, 's')
elif isinstance(xrng[0], dt.datetime):
print(f'\nAttempting to set xlim range to single value {xrng[0]}. '
'Expanding range by 2 seconds.\n')
xrng[0] -= dt.timedelta(seconds=1)
xrng[1] += dt.timedelta(seconds=1)
self.axes[subplot_index].set_xlim(xrng)
# Make sure that the xrng value is a numpy array not pandas
if isinstance(xrng[0], pd.Timestamp):
xrng = [x.to_numpy() for x in xrng if isinstance(x, pd.Timestamp)]
# Make sure that the xrng value is a numpy array not datetime.datetime
if isinstance(xrng[0], dt.datetime):
xrng = [np.datetime64(x) for x in xrng if isinstance(x, dt.datetime)]
if len(subplot_index) < 2:
self.xrng[subplot_index, 0] = xrng[0].astype('datetime64[D]').astype(float)
self.xrng[subplot_index, 1] = xrng[1].astype('datetime64[D]').astype(float)
else:
self.xrng[subplot_index][0] = xrng[0].astype('datetime64[D]').astype(float)
self.xrng[subplot_index][1] = xrng[1].astype('datetime64[D]').astype(float)
def set_yrng(self, yrng, subplot_index=(0,)):
"""
Sets the y range of the plot.
Parameters
----------
yrng : 2 number array
The y limits of the plot.
subplot_index : 1 or 2D tuple, list, or array
The index of the subplot to set the x range of.
"""
if self.axes is None:
raise RuntimeError('set_yrng requires the plot to be displayed.')
if not hasattr(self, 'yrng') and len(self.axes.shape) == 2:
self.yrng = np.zeros((self.axes.shape[0], self.axes.shape[1], 2))
elif not hasattr(self, 'yrng') and len(self.axes.shape) == 1:
self.yrng = np.zeros((self.axes.shape[0], 2))
if yrng[0] == yrng[1]:
yrng[1] = yrng[1] + 1
self.axes[subplot_index].set_ylim(yrng)
try:
self.yrng[subplot_index, :] = yrng
except IndexError:
self.yrng[subplot_index] = yrng
def plot(
self,
field,
dsname=None,
subplot_index=(0,),
cmap=None,
set_title=None,
add_nan=False,
day_night_background=False,
invert_y_axis=False,
abs_limits=(None, None),
time_rng=None,
y_rng=None,
use_var_for_y=None,
set_shading='auto',
assessment_overplot=False,
overplot_marker='.',
overplot_behind=False,
overplot_markersize=6,
assessment_overplot_category={
'Incorrect': ['Bad', 'Incorrect'],
'Suspect': ['Indeterminate', 'Suspect'],
},
assessment_overplot_category_color={'Incorrect': 'red', 'Suspect': 'orange'},
force_line_plot=False,
labels=False,
cbar_label=None,
cbar_h_adjust=None,
secondary_y=False,
y_axis_flag_meanings=False,
colorbar_labels=None,
cb_friendly=False,
match_line_label_color=False,
**kwargs,
):
"""
Makes a timeseries plot. If subplots have not been added yet, an axis
will be created assuming that there is only going to be one plot.
If plotting a high data volume 2D dataset, it may take some time to plot.
In order to speed up your plot creation, please resample your data to a
lower resolution dataset.
Parameters
----------
field : str
The name of the field to plot.
dsname : None or str
If there is more than one datastream in the display object the
name of the datastream needs to be specified. If set to None and
there is only one datastream ACT will use the sole datastream
in the object.
subplot_index : 1 or 2D tuple, list, or array
The index of the subplot to set the x range of.
cmap : matplotlib colormap
The colormap to use.
set_title : str
The title for the plot.
add_nan : bool
Set to True to fill in data gaps with NaNs.
day_night_background : bool
Set to True to fill in a color coded background.
according to the time of day.
abs_limits : tuple or list
Sets the bounds on plot limits even if data values exceed
those limits. Set to (ymin,ymax). Use None if only setting
minimum or maximum limit, i.e. (22., None).
time_rng : tuple or list
List or tuple with (min, max) values to set the x-axis range
limits.
y_rng : tuple or list
List or tuple with (min, max) values to set the y-axis range
use_var_for_y : str
Set this to the name of a data variable in the Dataset to use as
the y-axis variable instead of the default dimension. Useful for
instances where data has an index-based dimension instead of a
height-based dimension. If shapes of arrays do not match it will
automatically revert back to the original ydata.
set_shading : string
Option to to set the matplotlib.pcolormesh shading parameter.
Default to 'auto'
assessment_overplot : boolean
Option to overplot quality control colored symbols over plotted
data using flag_assessment categories.
overplot_marker : str
Marker to use for overplot symbol.
overplot_behind : bool
Place the overplot marker behind the data point.
overplot_markersize : float or int
Size of overplot marker. If overplot_behind or force_line_plot
are set the marker size will be double overplot_markersize so
the color is visible.
assessment_overplot_category : dict
Lookup to categorize assessments into groups. This allows using
multiple terms for the same quality control level of failure.
Also allows adding more to the defaults.
assessment_overplot_category_color : dict
Lookup to match overplot category color to assessment grouping.
force_line_plot : boolean
Option to plot 2D data as 1D line plots.
labels : boolean or list
Option to overwrite the legend labels. Must have same dimensions as
number of lines plotted.
cbar_label : str
Option to overwrite default colorbar label.
cbar_h_adjust : float
Option to adjust location of colorbar horizontally. Positive values
move to right negative values move to left.
secondary_y : boolean
Option to plot on secondary y axis.
y_axis_flag_meanings : boolean or int
When set to True and plotting state variable with flag_values and
flag_meanings attributes will replace y axis numerical values
with flag_meanings value. Set to a positive number larger than 1
to indicate maximum word length to use. If text is longer that the
value and has space characters will split text over multiple lines.
colorbar_labels : dict
A dictionary containing values for plotting a 2D array of state variables.
The dictionary uses data values as keys and a dictionary containing keys
'text' and 'color' for each data value to plot.
Example:
{0: {'text': 'Clear sky', 'color': 'white'},
1: {'text': 'Liquid', 'color': 'green'},
2: {'text': 'Ice', 'color': 'blue'},
3: {'text': 'Mixed phase', 'color': 'purple'}}
cb_friendly : boolean
Set to true if you want to use the integrated colorblind friendly
colors for green/red based on the Homeyer colormap.
match_line_label_color : boolean
Will set the y label to match the line color in the plot. This
will only work if the time series plot is a line plot.
**kwargs : keyword arguments
The keyword arguments for :func:`plt.plot` (1D timeseries) or
:func:`plt.pcolormesh` (2D timeseries).
Returns
-------
ax : matplotlib axis handle
The matplotlib axis handle of the plot.
"""
if dsname is None and len(self._ds.keys()) > 1:
raise ValueError(
'You must choose a datastream when there are 2 '
'or more datasets in the TimeSeriesDisplay '
'object.'
)
elif dsname is None:
dsname = list(self._ds.keys())[0]
if y_axis_flag_meanings:
kwargs['linestyle'] = ''
if cb_friendly:
cmap = 'act_HomeyerRainbow'
assessment_overplot_category_color['Bad'] = (0.9285714285714286, 0.7130901016453677, 0.7130901016453677)
assessment_overplot_category_color['Incorrect'] = (0.9285714285714286, 0.7130901016453677, 0.7130901016453677)
assessment_overplot_category_color['Not Failing'] = (0.0, 0.4240129715562796, 0.4240129715562796),
assessment_overplot_category_color['Acceptable'] = (0.0, 0.4240129715562796, 0.4240129715562796),
# Get data and dimensions
data = self._ds[dsname][field]
dim = list(self._ds[dsname][field].dims)
xdata = self._ds[dsname][dim[0]]
if 'units' in data.attrs:
ytitle = ''.join(['(', data.attrs['units'], ')'])
else:
ytitle = field
if cbar_label is None:
cbar_default = ytitle
if len(dim) > 1:
if use_var_for_y is None:
ydata = self._ds[dsname][dim[1]]
else:
ydata = self._ds[dsname][use_var_for_y]
ydata_dim1 = self._ds[dsname][dim[1]]
if np.shape(ydata) != np.shape(ydata_dim1):
ydata = ydata_dim1
units = ytitle
if 'units' in ydata.attrs.keys():
units = ydata.attrs['units']
ytitle = ''.join(['(', units, ')'])
else:
units = ''
ytitle = dim[1]
# Create labels if 2d as 1d
if force_line_plot is True:
if labels is True:
labels = [' '.join([str(d), units]) for d in ydata.values]
ytitle = f"({data.attrs['units']})"
ydata = None
else:
ydata = None
# Get the current plotting axis, add day/night background and plot data
if self.fig is None:
self.fig = plt.figure()
if self.axes is None:
self.axes = np.array([plt.axes()])
self.fig.add_axes(self.axes[0])
# Set up secondary y axis if requested
if secondary_y is False:
ax = self.axes[subplot_index]
else:
ax = self.axes[subplot_index].twinx()
if colorbar_labels is not None:
flag_values = list(colorbar_labels.keys())
flag_meanings = [value['text'] for key, value in colorbar_labels.items()]
cbar_colors = [value['color'] for key, value in colorbar_labels.items()]
cmap = mpl.colors.ListedColormap(cbar_colors)
for ii, flag_meaning in enumerate(flag_meanings):
if len(flag_meaning) > 20:
flag_meaning = textwrap.fill(flag_meaning, width=20)
flag_meanings[ii] = flag_meaning
else:
flag_values = None
flag_meanings = None
cbar_colors = None
if ydata is None:
# Add in nans to ensure the data does not connect the line.
if add_nan is True:
xdata, data = data_utils.add_in_nan(xdata, data)
if day_night_background is True:
self.day_night_background(subplot_index=subplot_index, dsname=dsname)
# If limiting data being plotted use masked arrays
# Need to do it this way because of autoscale() method
if abs_limits[0] is not None and abs_limits[1] is not None:
data = np.ma.masked_outside(data, abs_limits[0], abs_limits[1])
elif abs_limits[0] is not None and abs_limits[1] is None:
data = np.ma.masked_less_equal(data, abs_limits[0])
elif abs_limits[0] is None and abs_limits[1] is not None:
data = np.ma.masked_greater_equal(data, abs_limits[1])
# Plot the data
if 'marker' not in kwargs.keys():
kwargs['marker'] = '.'
lines = ax.plot(xdata, data, **kwargs)
# Check if we need to call legend method after plotting. This is only
# called when no assessment overplot is called.
add_legend = False
if 'label' in kwargs.keys():
add_legend = True
# Overplot failing data if requested
if assessment_overplot:
# If we are doing forced line plot from 2D data need to manage
# legend lables. Will make arrays to hold labels of QC failing
# because not set when labels not set.
if not isinstance(labels, list) and add_legend is False:
labels = []
lines = []
# For forced line plot need to plot QC behind point instead of
# on top of point.
zorder = None
if force_line_plot or overplot_behind:
zorder = 0
overplot_markersize *= 2.0
for assessment, categories in assessment_overplot_category.items():
flag_data = self._ds[dsname].qcfilter.get_masked_data(
field, rm_assessments=categories, return_inverse=True
)
if np.invert(flag_data.mask).any() and np.isfinite(flag_data).any():
try:
flag_data.mask = np.logical_or(data.mask, flag_data.mask)
except AttributeError:
pass
qc_ax = ax.plot(
xdata,
flag_data,
marker=overplot_marker,
linestyle='',
markersize=overplot_markersize,
color=assessment_overplot_category_color[assessment],
label=assessment,
zorder=zorder,
)
# If labels keyword is set need to add labels for calling legend
if isinstance(labels, list):
# If plotting forced_line_plot need to subset the Line2D object
# so we don't have more than one added to legend.
if len(qc_ax) > 1:
lines.extend(qc_ax[:1])
else:
lines.extend(qc_ax)
labels.append(assessment)
add_legend = True
# Add legend if labels are available
if isinstance(labels, list):
ax.legend(lines, labels)
elif add_legend:
ax.legend()
# Change y axis to text from flag_meanings if requested.
if y_axis_flag_meanings:
flag_meanings = self._ds[dsname][field].attrs['flag_meanings']
flag_values = self._ds[dsname][field].attrs['flag_values']
# If keyword is larger than 1 assume this is the maximum character length
# desired and insert returns to wrap text.
if y_axis_flag_meanings > 1:
for ii, flag_meaning in enumerate(flag_meanings):
if len(flag_meaning) > y_axis_flag_meanings:
flag_meaning = textwrap.fill(flag_meaning, width=y_axis_flag_meanings)
flag_meanings[ii] = flag_meaning
ax.set_yticks(flag_values)
ax.set_yticklabels(flag_meanings)
else:
# Add in nans to ensure the data are not streaking
if add_nan is True:
xdata, data = data_utils.add_in_nan(xdata, data)
# Sets shading parameter to auto. Matplotlib will check deminsions.
# If X,Y and C are same deminsions shading is set to nearest.
# If X and Y deminsions are 1 greater than C shading is set to flat.
if 'edgecolors' not in kwargs.keys():
kwargs['edgecolors'] = 'face'
mesh = ax.pcolormesh(
np.asarray(xdata),
ydata,
data.transpose(),
shading=set_shading,
cmap=cmap,
**kwargs,
)
# Set Title
if set_title is None:
if isinstance(self._ds[dsname].time.values[0], np.datetime64):
set_title = ' '.join(
[
dsname,
field,
'on',
dt_utils.numpy_to_arm_date(self._ds[dsname].time.values[0]),
]
)
else:
date_result = search(
r'\d{4}-\d{1,2}-\d{1,2}', self._ds[dsname].time.attrs['units']
)
if date_result is not None:
set_title = ' '.join([dsname, field, 'on', date_result.group(0)])
else:
set_title = ' '.join([dsname, field])
if secondary_y is False:
ax.set_title(set_title)
# Set YTitle
if not y_axis_flag_meanings:
if match_line_label_color and len(ax.get_lines()) > 0:
ax.set_ylabel(ytitle, color=ax.get_lines()[0].get_color())
else:
ax.set_ylabel(ytitle)
# Set X Limit - We want the same time axes for all subplots
if not hasattr(self, 'time_rng'):
if time_rng is not None:
self.time_rng = list(time_rng)
else:
self.time_rng = [xdata.min().values, xdata.max().values]
self.set_xrng(self.time_rng, subplot_index)
# Set Y Limit
if y_rng is not None:
self.set_yrng(y_rng)
if hasattr(self, 'yrng'):
# Make sure that the yrng is not just the default
if ydata is None:
if abs_limits[0] is not None or abs_limits[1] is not None:
our_data = data
else:
our_data = data.values
else:
our_data = ydata
finite = np.isfinite(our_data)
if finite.any():
our_data = our_data[finite]
if invert_y_axis is False:
yrng = [np.min(our_data), np.max(our_data)]
else:
yrng = [np.max(our_data), np.min(our_data)]
else:
yrng = [0, 1]
# Check if current range is outside of new range an only set
# values that work for all data plotted.
if isinstance(yrng[0], np.datetime64):
yrng = mdates.datestr2num([str(yrng[0]), str(yrng[1])])
current_yrng = ax.get_ylim()
if invert_y_axis is False:
if yrng[0] > current_yrng[0]:
yrng[0] = current_yrng[0]
if yrng[1] < current_yrng[1]:
yrng[1] = current_yrng[1]
else:
if yrng[0] < current_yrng[0]:
yrng[0] = current_yrng[0]
if yrng[1] > current_yrng[1]:
yrng[1] = current_yrng[1]
# Set y range the normal way if not secondary y
# If secondary, just use set_ylim
if secondary_y is False:
self.set_yrng(yrng, subplot_index)
else:
ax.set_ylim(yrng)
# Set X Format
if len(subplot_index) == 1:
days = self.xrng[subplot_index, 1] - self.xrng[subplot_index, 0]
else:
days = self.xrng[subplot_index][1] - self.xrng[subplot_index][0]
myFmt = common.get_date_format(days)
ax.xaxis.set_major_formatter(myFmt)
# Set X format - We want the same time axes for all subplots
if not hasattr(self, 'time_fmt'):
self.time_fmt = myFmt
# Put on an xlabel, but only if we are making the bottom-most plot
if subplot_index[0] == self.axes.shape[0] - 1:
ax.set_xlabel('Time [UTC]')
if ydata is not None:
if cbar_label is None:
cbar_title = cbar_default
else:
cbar_title = ''.join(['(', cbar_label, ')'])
if colorbar_labels is not None:
cbar_title = None
cbar = self.add_colorbar(
mesh,
title=cbar_title,
subplot_index=subplot_index,
values=flag_values,
pad=cbar_h_adjust,
)
cbar.set_ticks(flag_values)
cbar.set_ticklabels(flag_meanings)
cbar.ax.tick_params(labelsize=10)
else:
self.add_colorbar(
mesh, title=cbar_title, subplot_index=subplot_index, pad=cbar_h_adjust
)
return ax
def plot_barbs_from_spd_dir(
self, speed_field, direction_field, pres_field=None, dsname=None, **kwargs
):
"""
This procedure will make a wind barb plot timeseries.
If a pressure field is given and the wind fields are 1D, which, for
example, would occur if one wants to plot a timeseries of
rawinsonde data, then a time-height cross section of
winds will be made.
Note: This procedure calls plot_barbs_from_u_v and will take in the
same keyword arguments as that procedure.
Parameters
----------
speed_field : str
The name of the field specifying the wind speed in m/s.
direction_field : str
The name of the field specifying the wind direction in degrees.
0 degrees is defined to be north and increases clockwise like
what is used in standard meteorological notation.
pres_field : str
The name of the field specifying pressure or height. If using
height coordinates, then we recommend setting invert_y_axis
to False.
dsname : str
The name of the datastream to plot. Setting to None will make
ACT attempt to autodetect this.
kwargs : dict
Any additional keyword arguments will be passed into
:func:`act.plotting.TimeSeriesDisplay.plot_barbs_from_u_and_v`.
Returns
-------
the_ax : matplotlib axis handle
The handle to the axis where the plot was made on.
Examples
--------
..code-block :: python
sonde_ds = act.io.armfiles.read_netcdf(
act.tests.sample_files.EXAMPLE_TWP_SONDE_WILDCARD)
BarbDisplay = act.plotting.TimeSeriesDisplay(
{'sonde_darwin': sonde_ds}, figsize=(10,5))
BarbDisplay.plot_barbs_from_spd_dir('deg', 'wspd', 'pres',
num_barbs_x=20)
"""
if dsname is None and len(self._ds.keys()) > 1:
raise ValueError(
'You must choose a datastream when there are 2 '
'or more datasets in the TimeSeriesDisplay '
'object.'
)
elif dsname is None:
dsname = list(self._ds.keys())[0]
# Make temporary field called tempu, tempv
spd = self._ds[dsname][speed_field]
dir = self._ds[dsname][direction_field]
tempu = -np.sin(np.deg2rad(dir)) * spd
tempv = -np.cos(np.deg2rad(dir)) * spd
self._ds[dsname]['temp_u'] = deepcopy(self._ds[dsname][speed_field])
self._ds[dsname]['temp_v'] = deepcopy(self._ds[dsname][speed_field])
self._ds[dsname]['temp_u'].values = tempu
self._ds[dsname]['temp_v'].values = tempv
the_ax = self.plot_barbs_from_u_v('temp_u', 'temp_v', pres_field, dsname, **kwargs)
del self._ds[dsname]['temp_u'], self._ds[dsname]['temp_v']
return the_ax
def plot_barbs_from_u_v(
self,
u_field,
v_field,
pres_field=None,
dsname=None,
subplot_index=(0,),
set_title=None,
day_night_background=False,
invert_y_axis=True,
num_barbs_x=20,
num_barbs_y=20,
use_var_for_y=None,
**kwargs,
):
"""
This function will plot a wind barb timeseries from u and v wind
data. If pres_field is given, a time-height series will be plotted
from 1-D wind data.
Parameters
----------
u_field : str
The name of the field containing the U component of the wind.
v_field : str
The name of the field containing the V component of the wind.
pres_field : str or None
The name of the field containing the pressure or height. Set
to None to not use this.
dsname : str or None
The name of the datastream to plot. Setting to None will make
ACT automatically try to determine this.
subplot_index : 2-tuple
The index of the subplot to make the plot on.
set_title : str or None
The title of the plot.
day_night_background : bool
Set to True to plot a day/night background.
invert_y_axis : bool
Set to True to invert the y axis (i.e. for plotting pressure as
the height coordinate).
num_barbs_x : int
The number of wind barbs to plot in the x axis.
num_barbs_y : int
The number of wind barbs to plot in the y axis.
cmap : matplotlib.colors.LinearSegmentedColormap
A color map to use with wind barbs. If this is set the plt.barbs
routine will be passed the C parameter scaled as sqrt of sum of the
squares and used with the passed in color map. A colorbar will also
be added. Setting the limits of the colorbar can be done with 'clim'.
Setting this changes the wind barbs from black to colors.
use_var_for_y : str
Set this to the name of a data variable in the Dataset to use as the
y-axis variable instead of the default dimension. Useful for instances
where data has an index-based dimension instead of a height-based
dimension. If shapes of arrays do not match it will automatically
revert back to the original ydata.
**kwargs : keyword arguments
Additional keyword arguments will be passed into plt.barbs.
Returns
-------
ax : matplotlib axis handle
The axis handle that contains the reference to the
constructed plot.
"""
if dsname is None and len(self._ds.keys()) > 1:
raise ValueError(
'You must choose a datastream when there are 2 '
'or more datasets in the TimeSeriesDisplay '
'object.'
)
elif dsname is None:
dsname = list(self._ds.keys())[0]
# Get data and dimensions
u = self._ds[dsname][u_field].values
v = self._ds[dsname][v_field].values
dim = list(self._ds[dsname][u_field].dims)
xdata = self._ds[dsname][dim[0]].values
num_x = xdata.shape[-1]
barb_step_x = round(num_x / num_barbs_x)
if barb_step_x == 0:
barb_step_x = 1
if len(dim) > 1 and pres_field is None:
if use_var_for_y is None:
ydata = self._ds[dsname][dim[1]]
else:
ydata = self._ds[dsname][use_var_for_y]
ydata_dim1 = self._ds[dsname][dim[1]]
if np.shape(ydata) != np.shape(ydata_dim1):
ydata = ydata_dim1
if 'units' in ydata.attrs:
units = ydata.attrs['units']
else:
units = ''
ytitle = ''.join(['(', units, ')'])
num_y = ydata.shape[0]
barb_step_y = round(num_y / num_barbs_y)
if barb_step_y == 0:
barb_step_y = 1
xdata, ydata = np.meshgrid(xdata, ydata, indexing='ij')
elif pres_field is not None:
# What we will do here is do a nearest-neighbor interpolation
# for each member of the series. Coordinates are time, pressure
pres = self._ds[dsname][pres_field]
u_interp = NearestNDInterpolator((xdata, pres.values), u, rescale=True)
v_interp = NearestNDInterpolator((xdata, pres.values), v, rescale=True)
barb_step_x = 1
barb_step_y = 1
x_times = pd.date_range(xdata.min(), xdata.max(), periods=num_barbs_x)
if num_barbs_y == 1:
y_levels = pres.mean()
else:
y_levels = np.linspace(np.nanmin(pres), np.nanmax(pres), num_barbs_y)
xdata, ydata = np.meshgrid(x_times, y_levels, indexing='ij')
u = u_interp(xdata, ydata)
v = v_interp(xdata, ydata)
if 'units' in pres.attrs:
units = pres.attrs['units']
else:
units = ''
ytitle = ''.join(['(', units, ')'])
else:
ydata = None
# Get the current plotting axis, add day/night background and plot data
if self.fig is None:
self.fig = plt.figure()
if self.axes is None:
self.axes = np.array([plt.axes()])
self.fig.add_axes(self.axes[0])
if ydata is None:
ydata = np.ones(xdata.shape)
if 'cmap' in kwargs.keys():
map_color = np.sqrt(np.power(u[::barb_step_x], 2) + np.power(v[::barb_step_x], 2))
map_color[np.isnan(map_color)] = 0
ax = self.axes[subplot_index].barbs(
xdata[::barb_step_x],
ydata[::barb_step_x],
u[::barb_step_x],
v[::barb_step_x],
map_color,
**kwargs,
)
plt.colorbar(
ax,
ax=[self.axes[subplot_index]],
label='Wind Speed (' + self._ds[dsname][u_field].attrs['units'] + ')',
)
else:
self.axes[subplot_index].barbs(
xdata[::barb_step_x],
ydata[::barb_step_x],
u[::barb_step_x],
v[::barb_step_x],
**kwargs,
)
self.axes[subplot_index].set_yticks([])
else:
if 'cmap' in kwargs.keys():
map_color = np.sqrt(
np.power(u[::barb_step_x, ::barb_step_y], 2)
+ np.power(v[::barb_step_x, ::barb_step_y], 2)
)
map_color[np.isnan(map_color)] = 0
ax = self.axes[subplot_index].barbs(
xdata[::barb_step_x, ::barb_step_y],
ydata[::barb_step_x, ::barb_step_y],
u[::barb_step_x, ::barb_step_y],
v[::barb_step_x, ::barb_step_y],
map_color,
**kwargs,
)
plt.colorbar(
ax,
ax=[self.axes[subplot_index]],
label='Wind Speed (' + self._ds[dsname][u_field].attrs['units'] + ')',
)
else:
ax = self.axes[subplot_index].barbs(
xdata[::barb_step_x, ::barb_step_y],
ydata[::barb_step_x, ::barb_step_y],
u[::barb_step_x, ::barb_step_y],
v[::barb_step_x, ::barb_step_y],
**kwargs,
)
if day_night_background is True:
self.day_night_background(subplot_index=subplot_index, dsname=dsname)
# Set Title
if set_title is None:
set_title = ' '.join(
[
dsname,
'on',
dt_utils.numpy_to_arm_date(self._ds[dsname].time.values[0]),
]
)
self.axes[subplot_index].set_title(set_title)
# Set YTitle
if 'ytitle' in locals():
self.axes[subplot_index].set_ylabel(ytitle)
# Set X Limit - We want the same time axes for all subplots
time_rng = [xdata.min(), xdata.max()]
self.set_xrng(time_rng, subplot_index)
# Set Y Limit
if hasattr(self, 'yrng'):
# Make sure that the yrng is not just the default
if not np.all(self.yrng[subplot_index] == 0):
self.set_yrng(self.yrng[subplot_index], subplot_index)
else:
if ydata is None:
our_data = xdata
else:
our_data = ydata
if np.isfinite(our_data).any():
if invert_y_axis is False:
yrng = [np.nanmin(our_data), np.nanmax(our_data)]
else:
yrng = [np.nanmax(our_data), np.nanmin(our_data)]
else:
yrng = [0, 1]
self.set_yrng(yrng, subplot_index)
# Set X Format
if len(subplot_index) == 1:
days = self.xrng[subplot_index, 1] - self.xrng[subplot_index, 0]
else:
days = (
self.xrng[subplot_index[0], subplot_index[1], 1]
- self.xrng[subplot_index[0], subplot_index[1], 0]
)
# Put on an xlabel, but only if we are making the bottom-most plot
if subplot_index[0] == self.axes.shape[0] - 1:
self.axes[subplot_index].set_xlabel('Time [UTC]')
myFmt = common.get_date_format(days)
self.axes[subplot_index].xaxis.set_major_formatter(myFmt)
return self.axes[subplot_index]
def plot_time_height_xsection_from_1d_data(
self,
data_field,
pres_field,
dsname=None,
subplot_index=(0,),
set_title=None,
day_night_background=False,
num_time_periods=20,
num_y_levels=20,
invert_y_axis=True,
cbar_label=None,
set_shading='auto',
**kwargs,
):
"""
This will plot a time-height cross section from 1D datasets using
nearest neighbor interpolation on a regular time by height grid.
All that is needed are a data variable and a height variable.
Parameters
----------
data_field : str
The name of the field to plot.
pres_field : str
The name of the height or pressure field to plot.
dsname : str or None
The name of the datastream to plot
subplot_index : 2-tuple
The index of the subplot to create the plot on.
set_title : str or None
The title of the plot.
day_night_background : bool
Set to true to plot the day/night background.
num_time_periods : int
Set to determine how many time periods. Setting to None
will do one time period per day.
num_y_levels : int
The number of levels in the y axis to use.
invert_y_axis : bool
Set to true to invert the y-axis (recommended for
pressure coordinates).
cbar_label : str
Option to overwrite default colorbar label.
set_shading : string
Option to to set the matplotlib.pcolormesh shading parameter.
Default to 'auto'
**kwargs : keyword arguments
Additional keyword arguments will be passed
into :func:`plt.pcolormesh`
Returns
-------
ax : matplotlib axis handle
The matplotlib axis handle pointing to the plot.
"""
if dsname is None and len(self._ds.keys()) > 1:
raise ValueError(
'You must choose a datastream when there are 2'
'or more datasets in the TimeSeriesDisplay'
'object.'
)
elif dsname is None:
dsname = list(self._ds.keys())[0]
dim = list(self._ds[dsname][data_field].dims)
if len(dim) > 1:
raise ValueError(
'plot_time_height_xsection_from_1d_data only '
'supports 1-D datasets. For datasets with 2 or '
'more dimensions use plot().'
)
# Get data and dimensions
data = self._ds[dsname][data_field].values
xdata = self._ds[dsname][dim[0]].values
# What we will do here is do a nearest-neighbor interpolation for each
# member of the series. Coordinates are time, pressure
pres = self._ds[dsname][pres_field]
u_interp = NearestNDInterpolator((xdata, pres.values), data, rescale=True)
# Mask points where we have no data
# Count number of unique days
x_times = pd.date_range(xdata.min(), xdata.max(), periods=num_time_periods)
y_levels = np.linspace(np.nanmin(pres), np.nanmax(pres), num_y_levels)
tdata, ydata = np.meshgrid(x_times, y_levels, indexing='ij')
data = u_interp(tdata, ydata)
ytitle = ''.join(['(', pres.attrs['units'], ')'])
units = data_field + ' (' + self._ds[dsname][data_field].attrs['units'] + ')'
# Get the current plotting axis, add day/night background and plot data
if self.fig is None:
self.fig = plt.figure()
if self.axes is None:
self.axes = np.array([plt.axes()])
self.fig.add_axes(self.axes[0])
mesh = self.axes[subplot_index].pcolormesh(
x_times, y_levels, np.transpose(data), shading=set_shading, **kwargs
)
if day_night_background is True:
self.day_night_background(subplot_index=subplot_index, dsname=dsname)
# Set Title
if set_title is None:
set_title = ' '.join(
[
dsname,
'on',
dt_utils.numpy_to_arm_date(self._ds[dsname].time.values[0]),
]
)
self.axes[subplot_index].set_title(set_title)
# Set YTitle
if 'ytitle' in locals():
self.axes[subplot_index].set_ylabel(ytitle)
# Set X Limit - We want the same time axes for all subplots
time_rng = [x_times[0], x_times[-1]]
self.set_xrng(time_rng, subplot_index)
# Set Y Limit
if hasattr(self, 'yrng'):
# Make sure that the yrng is not just the default
if not np.all(self.yrng[subplot_index] == 0):
self.set_yrng(self.yrng[subplot_index], subplot_index)
else:
if ydata is None:
our_data = data.values
else:
our_data = ydata
if np.isfinite(our_data).any():
if invert_y_axis is False:
yrng = [np.nanmin(our_data), np.nanmax(our_data)]
else:
yrng = [np.nanmax(our_data), np.nanmin(our_data)]
else:
yrng = [0, 1]
self.set_yrng(yrng, subplot_index)
# Set X Format
if len(subplot_index) == 1:
days = self.xrng[subplot_index, 1] - self.xrng[subplot_index, 0]
else:
days = (
self.xrng[subplot_index[0], subplot_index[1], 1]
- self.xrng[subplot_index[0], subplot_index[1], 0]
)
# Put on an xlabel, but only if we are making the bottom-most plot
if subplot_index[0] == self.axes.shape[0] - 1:
self.axes[subplot_index].set_xlabel('Time [UTC]')
if ydata is not None:
if cbar_label is None:
self.add_colorbar(mesh, title=units, subplot_index=subplot_index)
else:
self.add_colorbar(mesh, title=cbar_label, subplot_index=subplot_index)
myFmt = common.get_date_format(days)
self.axes[subplot_index].xaxis.set_major_formatter(myFmt)
return self.axes[subplot_index]
def time_height_scatter(
self,
data_field=None,
dsname=None,
cmap='rainbow',
alt_label=None,
alt_field='alt',
cb_label=None,
**kwargs,
):
"""
Create a time series plot of altitude and data variable with
color also indicating value with a color bar. The Color bar is
positioned to serve both as the indicator of the color intensity
and the second y-axis.
Parameters
----------
data_field : str
Name of data field in the dataset to plot on second y-axis.
height_field : str
Name of height field in the dataset to plot on first y-axis.
dsname : str or None
The name of the datastream to plot.
cmap : str
Colorbar color map to use.
alt_label : str
Altitude first y-axis label to use. If None, will try to use
long_name and units.
alt_field : str
Label for field in the dataset to plot on first y-axis.
cb_label : str
Colorbar label to use. If not set will try to use
long_name and units.
**kwargs : keyword arguments
Any other keyword arguments that will be passed
into TimeSeriesDisplay.plot module when the figure
is made.
"""
if dsname is None and len(self._ds.keys()) > 1:
raise ValueError(
'You must choose a datastream when there are 2 '
'or more datasets in the TimeSeriesDisplay '
'object.'
)
elif dsname is None:
dsname = list(self._ds.keys())[0]
# Get data and dimensions
data = self._ds[dsname][data_field]
altitude = self._ds[dsname][alt_field]
dim = list(self._ds[dsname][data_field].dims)
xdata = self._ds[dsname][dim[0]]
if alt_label is None:
try:
alt_label = altitude.attrs['long_name'] + ''.join(
[' (', altitude.attrs['units'], ')']
)
except KeyError:
alt_label = alt_field
if cb_label is None:
try:
cb_label = data.attrs['long_name'] + ''.join([' (', data.attrs['units'], ')'])
except KeyError:
cb_label = data_field
colorbar_map = plt.cm.get_cmap(cmap)
self.fig.subplots_adjust(left=0.1, right=0.86, bottom=0.16, top=0.91)
ax1 = self.plot(alt_field, color='black', **kwargs)
ax1.set_ylabel(alt_label)
ax2 = ax1.twinx()
sc = ax2.scatter(xdata.values, data.values, c=data.values, marker='.', cmap=colorbar_map)
cbaxes = self.fig.add_axes(
[
self.fig.subplotpars.right + 0.02,
self.fig.subplotpars.bottom,
0.02,
self.fig.subplotpars.top - self.fig.subplotpars.bottom,
]
)
cbar = plt.colorbar(sc, cax=cbaxes)
ax2.set_ylim(cbar.mappable.get_clim())
cbar.ax.set_ylabel(cb_label)
ax2.set_yticklabels([])
return self.axes[0]
def qc_flag_block_plot(
self,
data_field=None,
dsname=None,
subplot_index=(0,),
time_rng=None,
assessment_color=None,
edgecolor='face',
set_shading='auto',
cb_friendly=False,
**kwargs,
):
"""
Create a time series plot of embedded quality control values
using broken barh plotting.
Parameters
----------
data_field : str
Name of data field in the dataset to plot corresponding quality
control.
dsname : None or str
If there is more than one datastream in the display object the
name of the datastream needs to be specified. If set to None and
there is only one datastream ACT will use the sole datastream
in the object.
subplot_index : 1 or 2D tuple, list, or array
The index of the subplot to set the x range of.
time_rng : tuple or list
List or tuple with (min, max) values to set the x-axis range limits.
assessment_color : dict
Dictionary lookup to override default assessment to color. Make sure
assessment work is correctly set with case syntax.
edgecolor : str or list
Color name, list of color names or 'face' as defined in matplotlib.axes.Axes.broken_barh
set_shading : string
Option to to set the matplotlib.pcolormesh shading parameter.
Default to 'auto'
cb_friendly : boolean
Set to true if you want to use the integrated colorblind friendly
colors for green/red based on the Homeyer colormap
**kwargs : keyword arguments
The keyword arguments for :func:`plt.broken_barh`.
"""
# Color to plot associated with assessment.
color_lookup = {
'Bad': 'red',
'Incorrect': 'red',
'Indeterminate': 'orange',
'Suspect': 'orange',
'Missing': 'darkgray',
'Not Failing': 'green',
'Acceptable': 'green',
}
if cb_friendly:
color_lookup['Bad'] = (0.9285714285714286, 0.7130901016453677, 0.7130901016453677)
color_lookup['Incorrect'] = (0.9285714285714286, 0.7130901016453677, 0.7130901016453677)
color_lookup['Not Failing'] = (0.0, 0.4240129715562796, 0.4240129715562796),
color_lookup['Acceptable'] = (0.0, 0.4240129715562796, 0.4240129715562796),
if assessment_color is not None:
for asses, color in assessment_color.items():
color_lookup[asses] = color
if asses == 'Incorrect':
color_lookup['Bad'] = color
if asses == 'Suspect':
color_lookup['Indeterminate'] = color
# Set up list of test names to use for missing values
missing_val_long_names = [
'Value equal to missing_value*',
'Value set to missing_value*',
'Value is equal to missing_value*',
'Value is set to missing_value*',
]
if dsname is None and len(self._ds.keys()) > 1:
raise ValueError(
'You must choose a datastream when there are 2 '
'or more datasets in the TimeSeriesDisplay '
'object.'
)
elif dsname is None:
dsname = list(self._ds.keys())[0]
# Set up or get current plot figure
if self.fig is None:
self.fig = plt.figure()
# Set up or get current axes
if self.axes is None:
self.axes = np.array([plt.axes()])
self.fig.add_axes(self.axes[0])
ax = self.axes[subplot_index]
# Set X Limit - We want the same time axes for all subplots
data = self._ds[dsname][data_field]
dim = list(self._ds[dsname][data_field].dims)
xdata = self._ds[dsname][dim[0]]
# Get data and attributes
qc_data_field = self._ds[dsname].qcfilter.check_for_ancillary_qc(
data_field, add_if_missing=False, cleanup=False
)
if qc_data_field is None:
raise ValueError(f'No quality control ancillary variable in Dataset for {data_field}')
flag_masks = self._ds[dsname][qc_data_field].attrs['flag_masks']
flag_meanings = self._ds[dsname][qc_data_field].attrs['flag_meanings']
flag_assessments = self._ds[dsname][qc_data_field].attrs['flag_assessments']
# Get time ranges for green blocks
time_delta = determine_time_delta(xdata.values)
barh_list_green = reduce_time_ranges(xdata.values, time_delta=time_delta, broken_barh=True)
# Set background to gray indicating not available data
ax.set_facecolor('dimgray')
# Check if plotting 2D data vs 1D data. 2D data will be summarized by
# assessment category instead of showing each test.
data_shape = self._ds[dsname][qc_data_field].shape
if len(data_shape) > 1:
cur_assessments = list(set(flag_assessments))
cur_assessments.sort()
cur_assessments.reverse()
qc_data = np.full(data_shape, -1, dtype=np.int16)
plot_colors = []
tick_names = []
index = self._ds[dsname][qc_data_field].values == 0
if index.any():
qc_data[index] = 0
plot_colors.append(color_lookup['Not Failing'])
tick_names.append('Not Failing')
for ii, assess in enumerate(cur_assessments):
if assess not in color_lookup:
color_lookup[assess] = list(mplcolors.CSS4_COLORS.keys())[ii]
ii += 1
assess_data = self._ds[dsname].qcfilter.get_masked_data(
data_field, rm_assessments=assess
)
if assess_data.mask.any():
qc_data[assess_data.mask] = ii
plot_colors.append(color_lookup[assess])
tick_names.append(assess)
# Overwrite missing data. Not sure if we want to do this because VAPs set
# the value to missing but the test is set to Bad. This tries to overcome that
# by looking for correct test description that would only indicate the values
# are missing not that they are set to missing by a test... most likely.
missing_test_nums = []
for ii, flag_meaning in enumerate(flag_meanings):
# Check if the bit set is indicating missing data.
for val in missing_val_long_names:
if re_search(val, flag_meaning):
test_num = parse_bit(flag_masks[ii])[0]
missing_test_nums.append(test_num)
assess_data = self._ds[dsname].qcfilter.get_masked_data(
data_field, rm_tests=missing_test_nums
)
if assess_data.mask.any():
qc_data[assess_data.mask] = -1
plot_colors.append(color_lookup['Missing'])
tick_names.append('Missing')
# Create a masked array to allow not plotting where values are missing
qc_data = np.ma.masked_equal(qc_data, -1)
dims = self._ds[dsname][qc_data_field].dims
xvalues = self._ds[dsname][dims[0]].values
yvalues = self._ds[dsname][dims[1]].values
cMap = mplcolors.ListedColormap(plot_colors)
mesh = ax.pcolormesh(
xvalues,
yvalues,
np.transpose(qc_data),
cmap=cMap,
vmin=0,
shading=set_shading,
)
divider = make_axes_locatable(ax)
# Determine correct placement of words on colorbar
tick_nums = (
np.arange(0, len(tick_names) * 2 + 1) / (len(tick_names) * 2) * np.nanmax(qc_data)
)[1::2]
cax = divider.append_axes('bottom', size='5%', pad=0.3)
cbar = self.fig.colorbar(
mesh,
cax=cax,
orientation='horizontal',
spacing='uniform',
ticks=tick_nums,
shrink=0.5,
)
cbar.ax.set_xticklabels(tick_names)
# Set YTitle
dim_name = list(set(self._ds[dsname][qc_data_field].dims) - {'time'})
try:
ytitle = f"{dim_name[0]} ({self._ds[dsname][dim_name[0]].attrs['units']})"
ax.set_ylabel(ytitle)
except KeyError:
pass
# Add which tests were set as text to the plot
unique_values = []
for ii in np.unique(self._ds[dsname][qc_data_field].values):
unique_values.extend(parse_bit(ii))
if len(unique_values) > 0:
unique_values = list(set(unique_values))
unique_values.sort()
unique_values = [str(ii) for ii in unique_values]
self.fig.text(
0.5,
-0.35,
f"QC Tests Tripped: {', '.join(unique_values)}",
transform=ax.transAxes,
horizontalalignment='center',
verticalalignment='center',
fontweight='bold',
)
else:
test_nums = []
for ii, assess in enumerate(flag_assessments):
if assess not in color_lookup:
color_lookup[assess] = list(mplcolors.CSS4_COLORS.keys())[ii]
# Plot green data first.
ax.broken_barh(
barh_list_green,
(ii, ii + 1),
facecolors=color_lookup['Not Failing'],
edgecolor=edgecolor,
**kwargs,
)
# Get test number from flag_mask bitpacked number
test_nums.append(parse_bit(flag_masks[ii]))
# Get masked array data to use mask for finding if/where test is set
data = self._ds[dsname].qcfilter.get_masked_data(
data_field, rm_tests=test_nums[-1]
)
if np.any(data.mask):
# Get time ranges from time and masked data
barh_list = reduce_time_ranges(
xdata.values[data.mask], time_delta=time_delta, broken_barh=True
)
# Check if the bit set is indicating missing data. If so change
# to different plotting color than what is in flag_assessments.
for val in missing_val_long_names:
if re_search(val, flag_meanings[ii]):
assess = 'Missing'
break
# Lay down blocks of tripped tests using correct color
ax.broken_barh(
barh_list,
(ii, ii + 1),
facecolors=color_lookup[assess],
edgecolor=edgecolor,
**kwargs,
)
# Add test description to plot.
ax.text(xdata.values[0], ii + 0.5, ' ' + flag_meanings[ii], va='center')
# Change y ticks to test number
plt.yticks(
[ii + 0.5 for ii in range(0, len(test_nums))],
labels=['Test ' + str(ii[0]) for ii in test_nums],
)
# Set ylimit to number of tests plotted
ax.set_ylim(0, len(flag_assessments))
# Set X Limit - We want the same time axes for all subplots
if not hasattr(self, 'time_rng'):
if time_rng is not None:
self.time_rng = list(time_rng)
else:
self.time_rng = [xdata.min().values, xdata.max().values]
self.set_xrng(self.time_rng, subplot_index)
# Get X format - We want the same time axes for all subplots
if hasattr(self, 'time_fmt'):
ax.xaxis.set_major_formatter(self.time_fmt)
else:
# Set X Format
if len(subplot_index) == 1:
days = self.xrng[subplot_index, 1] - self.xrng[subplot_index, 0]
else:
days = (
self.xrng[subplot_index[0], subplot_index[1], 1]
- self.xrng[subplot_index[0], subplot_index[1], 0]
)
myFmt = common.get_date_format(days)
ax.xaxis.set_major_formatter(myFmt)
self.time_fmt = myFmt
return self.axes[subplot_index]
def fill_between(
self,
field,
dsname=None,
subplot_index=(0,),
set_title=None,
secondary_y=False,
**kwargs,
):
"""
Makes a fill_between plot, based on matplotlib
Parameters
----------
field : str
The name of the field to plot.
dsname : None or str
If there is more than one datastream in the display object the
name of the datastream needs to be specified. If set to None and
there is only one datastream ACT will use the sole datastream
in the object.
subplot_index : 1 or 2D tuple, list, or array
The index of the subplot to set the x range of.
set_title : str
The title for the plot.
secondary_y : boolean
Option to indicate if the data should be plotted on second y-axis.
**kwargs : keyword arguments
The keyword arguments for :func:`plt.plot` (1D timeseries) or
:func:`plt.pcolormesh` (2D timeseries).
Returns
-------
ax : matplotlib axis handle
The matplotlib axis handle of the plot.
"""
if dsname is None and len(self._ds.keys()) > 1:
raise ValueError(
'You must choose a datastream when there are 2 '
'or more datasets in the TimeSeriesDisplay '
'object.'
)
elif dsname is None:
dsname = list(self._ds.keys())[0]
# Get data and dimensions
data = self._ds[dsname][field]
dim = list(self._ds[dsname][field].dims)
xdata = self._ds[dsname][dim[0]]
if 'units' in data.attrs:
ytitle = ''.join(['(', data.attrs['units'], ')'])
else:
ytitle = field
# Get the current plotting axis, add day/night background and plot data
if self.fig is None:
self.fig = plt.figure()
if self.axes is None:
self.axes = np.array([plt.axes()])
self.fig.add_axes(self.axes[0])
# Set ax to appropriate axis
if secondary_y is False:
ax = self.axes[subplot_index]
else:
ax = self.axes[subplot_index].twinx()
ax.fill_between(xdata.values, data, **kwargs)
# Set X Format
if len(subplot_index) == 1:
days = self.xrng[subplot_index, 1] - self.xrng[subplot_index, 0]
else:
days = (
self.xrng[subplot_index[0], subplot_index[1], 1]
- self.xrng[subplot_index[0], subplot_index[1], 0]
)
myFmt = common.get_date_format(days)
ax.xaxis.set_major_formatter(myFmt)
# Set X format - We want the same time axes for all subplots
if not hasattr(self, 'time_fmt'):
self.time_fmt = myFmt
# Put on an xlabel, but only if we are making the bottom-most plot
if subplot_index[0] == self.axes.shape[0] - 1:
self.axes[subplot_index].set_xlabel('Time [UTC]')
# Set YTitle
ax.set_ylabel(ytitle)
# Set Title
if set_title is None:
set_title = ' '.join(
[
dsname,
field,
'on',
dt_utils.numpy_to_arm_date(self._ds[dsname].time.values[0]),
]
)
if secondary_y is False:
ax.set_title(set_title)
return self.axes[subplot_index]
<|code_end|>
|
act/plotting/timeseriesdisplay.py
<|code_start|>"""
Stores the class for TimeSeriesDisplay.
"""
import datetime as dt
import textwrap
import warnings
from copy import deepcopy
from re import search, search as re_search
import matplotlib as mpl
import matplotlib.dates as mdates
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from matplotlib import colors as mplcolors
from mpl_toolkits.axes_grid1 import make_axes_locatable
from scipy.interpolate import NearestNDInterpolator
from ..qc.qcfilter import parse_bit
from ..utils import data_utils, datetime_utils as dt_utils
from ..utils.datetime_utils import determine_time_delta, reduce_time_ranges
from ..utils.geo_utils import get_sunrise_sunset_noon
from . import common
from .plot import Display
class TimeSeriesDisplay(Display):
"""
This subclass contains routines that are specific to plotting
time series plots from data. It is inherited from Display and therefore
contains all of Display's attributes and methods.
Examples
--------
To create a TimeSeriesDisplay with 3 rows, simply do:
.. code-block:: python
ds = act.read_netcdf(the_file)
disp = act.plotting.TimeSeriesDisplay(ds, subplot_shape=(3,), figsize=(15, 5))
The TimeSeriesDisplay constructor takes in the same keyword arguments as
plt.subplots. For more information on the plt.subplots keyword arguments,
see the `matplotlib documentation
<https://matplotlib.org/api/_as_gen/matplotlib.pyplot.subplots.html>`_.
If no subplot_shape is provided, then no figure or axis will be created
until add_subplots or plots is called.
"""
def __init__(self, ds, subplot_shape=(1,), ds_name=None, **kwargs):
super().__init__(ds, subplot_shape, ds_name, **kwargs)
def day_night_background(self, dsname=None, subplot_index=(0,)):
"""
Colorcodes the background according to sunrise/sunset.
Parameters
----------
dsname : None or str
If there is more than one datastream in the display object the
name of the datastream needs to be specified. If set to None and
there is only one datastream then ACT will use the sole datastream
in the object.
subplot_index : 1 or 2D tuple, list, or array
The index to the subplot to place the day and night background in.
"""
if dsname is None and len(self._ds.keys()) > 1:
raise ValueError(
'You must choose a datastream to derive the '
+ 'information needed for the day and night '
+ 'background when 2 or more datasets are in '
+ 'the display object.'
)
elif dsname is None:
dsname = list(self._ds.keys())[0]
# Get File Dates
try:
file_dates = self._ds[dsname].attrs['_file_dates']
except KeyError:
file_dates = []
if len(file_dates) == 0:
sdate = dt_utils.numpy_to_arm_date(self._ds[dsname].time.values[0])
edate = dt_utils.numpy_to_arm_date(self._ds[dsname].time.values[-1])
file_dates = [sdate, edate]
all_dates = dt_utils.dates_between(file_dates[0], file_dates[-1])
if self.axes is None:
raise RuntimeError('day_night_background requires the plot to ' 'be displayed.')
ax = self.axes[subplot_index]
# Find variable names for latitude and longitude
variables = list(self._ds[dsname].data_vars)
lat_name = [var for var in ['lat', 'latitude'] if var in variables]
lon_name = [var for var in ['lon', 'longitude'] if var in variables]
if len(lat_name) == 0:
lat_name = None
else:
lat_name = lat_name[0]
if len(lon_name) == 0:
lon_name = None
else:
lon_name = lon_name[0]
# Variable name does not match, look for standard_name declaration
if lat_name is None or lon_name is None:
for var in variables:
try:
if self._ds[dsname][var].attrs['standard_name'] == 'latitude':
lat_name = var
except KeyError:
pass
try:
if self._ds[dsname][var].attrs['standard_name'] == 'longitude':
lon_name = var
except KeyError:
pass
if lat_name is not None and lon_name is not None:
break
if lat_name is None or lon_name is None:
return
# Extract latitude and longitude scalar from variable. If variable is a vector look
# for first non-Nan value.
lat_lon_list = [np.nan, np.nan]
for ii, var_name in enumerate([lat_name, lon_name]):
try:
values = self._ds[dsname][var_name].values
if values.size == 1:
lat_lon_list[ii] = float(values)
else:
# Look for non-NaN values to use for latitude locaiton. If not found use first value.
index = np.where(np.isfinite(values))[0]
if index.size == 0:
lat_lon_list[ii] = float(values[0])
else:
lat_lon_list[ii] = float(values[index[0]])
except AttributeError:
pass
for value, name in zip(lat_lon_list, ['Latitude', 'Longitude']):
if not np.isfinite(value):
warnings.warn(f"{name} value in dataset equal to '{value}' is not finite. ", RuntimeWarning)
return
lat = lat_lon_list[0]
lon = lat_lon_list[1]
lat_range = [-90, 90]
if not (lat_range[0] <= lat <= lat_range[1]):
warnings.warn(
f"Latitude value in dataset of '{lat}' not within acceptable "
f'range of {lat_range[0]} <= latitude <= {lat_range[1]}. ',
RuntimeWarning,
)
return
lon_range = [-180, 180]
if not (lon_range[0] <= lon <= lon_range[1]):
warnings.warn(
f"Longitude value in dataset of '{lon}' not within acceptable "
f'range of {lon_range[0]} <= longitude <= {lon_range[1]}. ',
RuntimeWarning,
)
return
# Initialize the plot to a gray background for total darkness
rect = ax.patch
rect.set_facecolor('0.85')
# Get date ranges to plot
plot_dates = []
for f in all_dates:
for ii in [-1, 0, 1]:
plot_dates.append(f + dt.timedelta(days=ii))
# Get sunrise, sunset and noon times
sunrise, sunset, noon = get_sunrise_sunset_noon(lat, lon, plot_dates)
# Plot daylight
for ii in range(0, len(sunrise)):
ax.axvspan(sunrise[ii], sunset[ii], facecolor='#FFFFCC', zorder=0)
# Plot noon line
for ii in noon:
ax.axvline(x=ii, linestyle='--', color='y', zorder=1)
def set_xrng(self, xrng, subplot_index=(0,)):
"""
Sets the x range of the plot.
Parameters
----------
xrng : 2 number array
The x limits of the plot.
subplot_index : 1 or 2D tuple, list, or array
The index of the subplot to set the x range of.
"""
if self.axes is None:
raise RuntimeError('set_xrng requires the plot to be displayed.')
# If the xlim is set to the same value for range it will throw a warning
# This is to catch that and expand the range so we avoid the warning.
if xrng[0] == xrng[1]:
if isinstance(xrng[0], np.datetime64):
print(f'\nAttempting to set xlim range to single value {xrng[0]}. '
'Expanding range by 2 seconds.\n')
xrng[0] -= np.timedelta64(1, 's')
xrng[1] += np.timedelta64(1, 's')
elif isinstance(xrng[0], dt.datetime):
print(f'\nAttempting to set xlim range to single value {xrng[0]}. '
'Expanding range by 2 seconds.\n')
xrng[0] -= dt.timedelta(seconds=1)
xrng[1] += dt.timedelta(seconds=1)
self.axes[subplot_index].set_xlim(xrng)
# Make sure that the xrng value is a numpy array not pandas
if isinstance(xrng[0], pd.Timestamp):
xrng = [x.to_numpy() for x in xrng if isinstance(x, pd.Timestamp)]
# Make sure that the xrng value is a numpy array not datetime.datetime
if isinstance(xrng[0], dt.datetime):
xrng = [np.datetime64(x) for x in xrng if isinstance(x, dt.datetime)]
if len(subplot_index) < 2:
self.xrng[subplot_index, 0] = xrng[0].astype('datetime64[D]').astype(float)
self.xrng[subplot_index, 1] = xrng[1].astype('datetime64[D]').astype(float)
else:
self.xrng[subplot_index][0] = xrng[0].astype('datetime64[D]').astype(float)
self.xrng[subplot_index][1] = xrng[1].astype('datetime64[D]').astype(float)
def set_yrng(self, yrng, subplot_index=(0,), match_axes_ylimits=False):
"""
Sets the y range of the plot.
Parameters
----------
yrng : 2 number array
The y limits of the plot.
subplot_index : 1 or 2D tuple, list, or array
The index of the subplot to set the y range of. This is
ignored if match_axes_ylimits is True.
match_axes_ylimits : boolean
If True, all axes in the display object will have matching
provided ylims. Default is False. This is especially useful
when utilizing a groupby display with many axes.
"""
if self.axes is None:
raise RuntimeError('set_yrng requires the plot to be displayed.')
if not hasattr(self, 'yrng') and len(self.axes.shape) == 2:
self.yrng = np.zeros((self.axes.shape[0], self.axes.shape[1], 2))
elif not hasattr(self, 'yrng') and len(self.axes.shape) == 1:
self.yrng = np.zeros((self.axes.shape[0], 2))
if yrng[0] == yrng[1]:
yrng[1] = yrng[1] + 1
# Sets all axes ylims to the same values.
if match_axes_ylimits:
for i in range(self.axes.shape[0]):
for j in range(self.axes.shape[1]):
self.axes[i, j].set_ylim(yrng)
else:
self.axes[subplot_index].set_ylim(yrng)
try:
self.yrng[subplot_index, :] = yrng
except IndexError:
self.yrng[subplot_index] = yrng
def plot(
self,
field,
dsname=None,
subplot_index=(0,),
cmap=None,
set_title=None,
add_nan=False,
day_night_background=False,
invert_y_axis=False,
abs_limits=(None, None),
time_rng=None,
y_rng=None,
use_var_for_y=None,
set_shading='auto',
assessment_overplot=False,
overplot_marker='.',
overplot_behind=False,
overplot_markersize=6,
assessment_overplot_category={
'Incorrect': ['Bad', 'Incorrect'],
'Suspect': ['Indeterminate', 'Suspect'],
},
assessment_overplot_category_color={'Incorrect': 'red', 'Suspect': 'orange'},
force_line_plot=False,
labels=False,
cbar_label=None,
cbar_h_adjust=None,
secondary_y=False,
y_axis_flag_meanings=False,
colorbar_labels=None,
cb_friendly=False,
match_line_label_color=False,
**kwargs,
):
"""
Makes a timeseries plot. If subplots have not been added yet, an axis
will be created assuming that there is only going to be one plot.
If plotting a high data volume 2D dataset, it may take some time to plot.
In order to speed up your plot creation, please resample your data to a
lower resolution dataset.
Parameters
----------
field : str
The name of the field to plot.
dsname : None or str
If there is more than one datastream in the display object the
name of the datastream needs to be specified. If set to None and
there is only one datastream ACT will use the sole datastream
in the object.
subplot_index : 1 or 2D tuple, list, or array
The index of the subplot to set the x range of.
cmap : matplotlib colormap
The colormap to use.
set_title : str
The title for the plot.
add_nan : bool
Set to True to fill in data gaps with NaNs.
day_night_background : bool
Set to True to fill in a color coded background.
according to the time of day.
abs_limits : tuple or list
Sets the bounds on plot limits even if data values exceed
those limits. Set to (ymin,ymax). Use None if only setting
minimum or maximum limit, i.e. (22., None).
time_rng : tuple or list
List or tuple with (min, max) values to set the x-axis range
limits.
y_rng : tuple or list
List or tuple with (min, max) values to set the y-axis range
use_var_for_y : str
Set this to the name of a data variable in the Dataset to use as
the y-axis variable instead of the default dimension. Useful for
instances where data has an index-based dimension instead of a
height-based dimension. If shapes of arrays do not match it will
automatically revert back to the original ydata.
set_shading : string
Option to to set the matplotlib.pcolormesh shading parameter.
Default to 'auto'
assessment_overplot : boolean
Option to overplot quality control colored symbols over plotted
data using flag_assessment categories.
overplot_marker : str
Marker to use for overplot symbol.
overplot_behind : bool
Place the overplot marker behind the data point.
overplot_markersize : float or int
Size of overplot marker. If overplot_behind or force_line_plot
are set the marker size will be double overplot_markersize so
the color is visible.
assessment_overplot_category : dict
Lookup to categorize assessments into groups. This allows using
multiple terms for the same quality control level of failure.
Also allows adding more to the defaults.
assessment_overplot_category_color : dict
Lookup to match overplot category color to assessment grouping.
force_line_plot : boolean
Option to plot 2D data as 1D line plots.
labels : boolean or list
Option to overwrite the legend labels. Must have same dimensions as
number of lines plotted.
cbar_label : str
Option to overwrite default colorbar label.
cbar_h_adjust : float
Option to adjust location of colorbar horizontally. Positive values
move to right negative values move to left.
secondary_y : boolean
Option to plot on secondary y axis.
y_axis_flag_meanings : boolean or int
When set to True and plotting state variable with flag_values and
flag_meanings attributes will replace y axis numerical values
with flag_meanings value. Set to a positive number larger than 1
to indicate maximum word length to use. If text is longer that the
value and has space characters will split text over multiple lines.
colorbar_labels : dict
A dictionary containing values for plotting a 2D array of state variables.
The dictionary uses data values as keys and a dictionary containing keys
'text' and 'color' for each data value to plot.
Example:
{0: {'text': 'Clear sky', 'color': 'white'},
1: {'text': 'Liquid', 'color': 'green'},
2: {'text': 'Ice', 'color': 'blue'},
3: {'text': 'Mixed phase', 'color': 'purple'}}
cb_friendly : boolean
Set to true if you want to use the integrated colorblind friendly
colors for green/red based on the Homeyer colormap.
match_line_label_color : boolean
Will set the y label to match the line color in the plot. This
will only work if the time series plot is a line plot.
**kwargs : keyword arguments
The keyword arguments for :func:`plt.plot` (1D timeseries) or
:func:`plt.pcolormesh` (2D timeseries).
Returns
-------
ax : matplotlib axis handle
The matplotlib axis handle of the plot.
"""
if dsname is None and len(self._ds.keys()) > 1:
raise ValueError(
'You must choose a datastream when there are 2 '
'or more datasets in the TimeSeriesDisplay '
'object.'
)
elif dsname is None:
dsname = list(self._ds.keys())[0]
if y_axis_flag_meanings:
kwargs['linestyle'] = ''
if cb_friendly:
cmap = 'act_HomeyerRainbow'
assessment_overplot_category_color['Bad'] = (0.9285714285714286, 0.7130901016453677, 0.7130901016453677)
assessment_overplot_category_color['Incorrect'] = (0.9285714285714286, 0.7130901016453677, 0.7130901016453677)
assessment_overplot_category_color['Not Failing'] = (0.0, 0.4240129715562796, 0.4240129715562796),
assessment_overplot_category_color['Acceptable'] = (0.0, 0.4240129715562796, 0.4240129715562796),
# Get data and dimensions
data = self._ds[dsname][field]
dim = list(self._ds[dsname][field].dims)
xdata = self._ds[dsname][dim[0]]
if 'units' in data.attrs:
ytitle = ''.join(['(', data.attrs['units'], ')'])
else:
ytitle = field
if cbar_label is None:
cbar_default = ytitle
if len(dim) > 1:
if use_var_for_y is None:
ydata = self._ds[dsname][dim[1]]
else:
ydata = self._ds[dsname][use_var_for_y]
ydata_dim1 = self._ds[dsname][dim[1]]
if np.shape(ydata) != np.shape(ydata_dim1):
ydata = ydata_dim1
units = ytitle
if 'units' in ydata.attrs.keys():
units = ydata.attrs['units']
ytitle = ''.join(['(', units, ')'])
else:
units = ''
ytitle = dim[1]
# Create labels if 2d as 1d
if force_line_plot is True:
if labels is True:
labels = [' '.join([str(d), units]) for d in ydata.values]
ytitle = f"({data.attrs['units']})"
ydata = None
else:
ydata = None
# Get the current plotting axis, add day/night background and plot data
if self.fig is None:
self.fig = plt.figure()
if self.axes is None:
self.axes = np.array([plt.axes()])
self.fig.add_axes(self.axes[0])
# Set up secondary y axis if requested
if secondary_y is False:
ax = self.axes[subplot_index]
else:
ax = self.axes[subplot_index].twinx()
if colorbar_labels is not None:
flag_values = list(colorbar_labels.keys())
flag_meanings = [value['text'] for key, value in colorbar_labels.items()]
cbar_colors = [value['color'] for key, value in colorbar_labels.items()]
cmap = mpl.colors.ListedColormap(cbar_colors)
for ii, flag_meaning in enumerate(flag_meanings):
if len(flag_meaning) > 20:
flag_meaning = textwrap.fill(flag_meaning, width=20)
flag_meanings[ii] = flag_meaning
else:
flag_values = None
flag_meanings = None
cbar_colors = None
if ydata is None:
# Add in nans to ensure the data does not connect the line.
if add_nan is True:
xdata, data = data_utils.add_in_nan(xdata, data)
if day_night_background is True:
self.day_night_background(subplot_index=subplot_index, dsname=dsname)
# If limiting data being plotted use masked arrays
# Need to do it this way because of autoscale() method
if abs_limits[0] is not None and abs_limits[1] is not None:
data = np.ma.masked_outside(data, abs_limits[0], abs_limits[1])
elif abs_limits[0] is not None and abs_limits[1] is None:
data = np.ma.masked_less_equal(data, abs_limits[0])
elif abs_limits[0] is None and abs_limits[1] is not None:
data = np.ma.masked_greater_equal(data, abs_limits[1])
# Plot the data
if 'marker' not in kwargs.keys():
kwargs['marker'] = '.'
lines = ax.plot(xdata, data, **kwargs)
# Check if we need to call legend method after plotting. This is only
# called when no assessment overplot is called.
add_legend = False
if 'label' in kwargs.keys():
add_legend = True
# Overplot failing data if requested
if assessment_overplot:
# If we are doing forced line plot from 2D data need to manage
# legend lables. Will make arrays to hold labels of QC failing
# because not set when labels not set.
if not isinstance(labels, list) and add_legend is False:
labels = []
lines = []
# For forced line plot need to plot QC behind point instead of
# on top of point.
zorder = None
if force_line_plot or overplot_behind:
zorder = 0
overplot_markersize *= 2.0
for assessment, categories in assessment_overplot_category.items():
flag_data = self._ds[dsname].qcfilter.get_masked_data(
field, rm_assessments=categories, return_inverse=True
)
if np.invert(flag_data.mask).any() and np.isfinite(flag_data).any():
try:
flag_data.mask = np.logical_or(data.mask, flag_data.mask)
except AttributeError:
pass
qc_ax = ax.plot(
xdata,
flag_data,
marker=overplot_marker,
linestyle='',
markersize=overplot_markersize,
color=assessment_overplot_category_color[assessment],
label=assessment,
zorder=zorder,
)
# If labels keyword is set need to add labels for calling legend
if isinstance(labels, list):
# If plotting forced_line_plot need to subset the Line2D object
# so we don't have more than one added to legend.
if len(qc_ax) > 1:
lines.extend(qc_ax[:1])
else:
lines.extend(qc_ax)
labels.append(assessment)
add_legend = True
# Add legend if labels are available
if isinstance(labels, list):
ax.legend(lines, labels)
elif add_legend:
ax.legend()
# Change y axis to text from flag_meanings if requested.
if y_axis_flag_meanings:
flag_meanings = self._ds[dsname][field].attrs['flag_meanings']
flag_values = self._ds[dsname][field].attrs['flag_values']
# If keyword is larger than 1 assume this is the maximum character length
# desired and insert returns to wrap text.
if y_axis_flag_meanings > 1:
for ii, flag_meaning in enumerate(flag_meanings):
if len(flag_meaning) > y_axis_flag_meanings:
flag_meaning = textwrap.fill(flag_meaning, width=y_axis_flag_meanings)
flag_meanings[ii] = flag_meaning
ax.set_yticks(flag_values)
ax.set_yticklabels(flag_meanings)
else:
# Add in nans to ensure the data are not streaking
if add_nan is True:
xdata, data = data_utils.add_in_nan(xdata, data)
# Sets shading parameter to auto. Matplotlib will check deminsions.
# If X,Y and C are same deminsions shading is set to nearest.
# If X and Y deminsions are 1 greater than C shading is set to flat.
if 'edgecolors' not in kwargs.keys():
kwargs['edgecolors'] = 'face'
mesh = ax.pcolormesh(
np.asarray(xdata),
ydata,
data.transpose(),
shading=set_shading,
cmap=cmap,
**kwargs,
)
# Set Title
if set_title is None:
if isinstance(self._ds[dsname].time.values[0], np.datetime64):
set_title = ' '.join(
[
dsname,
field,
'on',
dt_utils.numpy_to_arm_date(self._ds[dsname].time.values[0]),
]
)
else:
date_result = search(
r'\d{4}-\d{1,2}-\d{1,2}', self._ds[dsname].time.attrs['units']
)
if date_result is not None:
set_title = ' '.join([dsname, field, 'on', date_result.group(0)])
else:
set_title = ' '.join([dsname, field])
if secondary_y is False:
ax.set_title(set_title)
# Set YTitle
if not y_axis_flag_meanings:
if match_line_label_color and len(ax.get_lines()) > 0:
ax.set_ylabel(ytitle, color=ax.get_lines()[0].get_color())
else:
ax.set_ylabel(ytitle)
# Set X Limit - We want the same time axes for all subplots
if not hasattr(self, 'time_rng'):
if time_rng is not None:
self.time_rng = list(time_rng)
else:
self.time_rng = [xdata.min().values, xdata.max().values]
self.set_xrng(self.time_rng, subplot_index)
# Set Y Limit
if y_rng is not None:
self.set_yrng(y_rng)
if hasattr(self, 'yrng'):
# Make sure that the yrng is not just the default
if ydata is None:
if abs_limits[0] is not None or abs_limits[1] is not None:
our_data = data
else:
our_data = data.values
else:
our_data = ydata
finite = np.isfinite(our_data)
if finite.any():
our_data = our_data[finite]
if invert_y_axis is False:
yrng = [np.min(our_data), np.max(our_data)]
else:
yrng = [np.max(our_data), np.min(our_data)]
else:
yrng = [0, 1]
# Check if current range is outside of new range an only set
# values that work for all data plotted.
if isinstance(yrng[0], np.datetime64):
yrng = mdates.datestr2num([str(yrng[0]), str(yrng[1])])
current_yrng = ax.get_ylim()
if invert_y_axis is False:
if yrng[0] > current_yrng[0]:
yrng[0] = current_yrng[0]
if yrng[1] < current_yrng[1]:
yrng[1] = current_yrng[1]
else:
if yrng[0] < current_yrng[0]:
yrng[0] = current_yrng[0]
if yrng[1] > current_yrng[1]:
yrng[1] = current_yrng[1]
# Set y range the normal way if not secondary y
# If secondary, just use set_ylim
if secondary_y is False:
self.set_yrng(yrng, subplot_index)
else:
ax.set_ylim(yrng)
# Set X Format
if len(subplot_index) == 1:
days = self.xrng[subplot_index, 1] - self.xrng[subplot_index, 0]
else:
days = self.xrng[subplot_index][1] - self.xrng[subplot_index][0]
myFmt = common.get_date_format(days)
ax.xaxis.set_major_formatter(myFmt)
# Set X format - We want the same time axes for all subplots
if not hasattr(self, 'time_fmt'):
self.time_fmt = myFmt
# Put on an xlabel, but only if we are making the bottom-most plot
if subplot_index[0] == self.axes.shape[0] - 1:
ax.set_xlabel('Time [UTC]')
if ydata is not None:
if cbar_label is None:
cbar_title = cbar_default
else:
cbar_title = ''.join(['(', cbar_label, ')'])
if colorbar_labels is not None:
cbar_title = None
cbar = self.add_colorbar(
mesh,
title=cbar_title,
subplot_index=subplot_index,
values=flag_values,
pad=cbar_h_adjust,
)
cbar.set_ticks(flag_values)
cbar.set_ticklabels(flag_meanings)
cbar.ax.tick_params(labelsize=10)
else:
self.add_colorbar(
mesh, title=cbar_title, subplot_index=subplot_index, pad=cbar_h_adjust
)
return ax
def plot_barbs_from_spd_dir(
self, speed_field, direction_field, pres_field=None, dsname=None, **kwargs
):
"""
This procedure will make a wind barb plot timeseries.
If a pressure field is given and the wind fields are 1D, which, for
example, would occur if one wants to plot a timeseries of
rawinsonde data, then a time-height cross section of
winds will be made.
Note: This procedure calls plot_barbs_from_u_v and will take in the
same keyword arguments as that procedure.
Parameters
----------
speed_field : str
The name of the field specifying the wind speed in m/s.
direction_field : str
The name of the field specifying the wind direction in degrees.
0 degrees is defined to be north and increases clockwise like
what is used in standard meteorological notation.
pres_field : str
The name of the field specifying pressure or height. If using
height coordinates, then we recommend setting invert_y_axis
to False.
dsname : str
The name of the datastream to plot. Setting to None will make
ACT attempt to autodetect this.
kwargs : dict
Any additional keyword arguments will be passed into
:func:`act.plotting.TimeSeriesDisplay.plot_barbs_from_u_and_v`.
Returns
-------
the_ax : matplotlib axis handle
The handle to the axis where the plot was made on.
Examples
--------
..code-block :: python
sonde_ds = act.io.armfiles.read_netcdf(
act.tests.sample_files.EXAMPLE_TWP_SONDE_WILDCARD)
BarbDisplay = act.plotting.TimeSeriesDisplay(
{'sonde_darwin': sonde_ds}, figsize=(10,5))
BarbDisplay.plot_barbs_from_spd_dir('deg', 'wspd', 'pres',
num_barbs_x=20)
"""
if dsname is None and len(self._ds.keys()) > 1:
raise ValueError(
'You must choose a datastream when there are 2 '
'or more datasets in the TimeSeriesDisplay '
'object.'
)
elif dsname is None:
dsname = list(self._ds.keys())[0]
# Make temporary field called tempu, tempv
spd = self._ds[dsname][speed_field]
dir = self._ds[dsname][direction_field]
tempu = -np.sin(np.deg2rad(dir)) * spd
tempv = -np.cos(np.deg2rad(dir)) * spd
self._ds[dsname]['temp_u'] = deepcopy(self._ds[dsname][speed_field])
self._ds[dsname]['temp_v'] = deepcopy(self._ds[dsname][speed_field])
self._ds[dsname]['temp_u'].values = tempu
self._ds[dsname]['temp_v'].values = tempv
the_ax = self.plot_barbs_from_u_v('temp_u', 'temp_v', pres_field, dsname, **kwargs)
del self._ds[dsname]['temp_u'], self._ds[dsname]['temp_v']
return the_ax
def plot_barbs_from_u_v(
self,
u_field,
v_field,
pres_field=None,
dsname=None,
subplot_index=(0,),
set_title=None,
day_night_background=False,
invert_y_axis=True,
num_barbs_x=20,
num_barbs_y=20,
use_var_for_y=None,
**kwargs,
):
"""
This function will plot a wind barb timeseries from u and v wind
data. If pres_field is given, a time-height series will be plotted
from 1-D wind data.
Parameters
----------
u_field : str
The name of the field containing the U component of the wind.
v_field : str
The name of the field containing the V component of the wind.
pres_field : str or None
The name of the field containing the pressure or height. Set
to None to not use this.
dsname : str or None
The name of the datastream to plot. Setting to None will make
ACT automatically try to determine this.
subplot_index : 2-tuple
The index of the subplot to make the plot on.
set_title : str or None
The title of the plot.
day_night_background : bool
Set to True to plot a day/night background.
invert_y_axis : bool
Set to True to invert the y axis (i.e. for plotting pressure as
the height coordinate).
num_barbs_x : int
The number of wind barbs to plot in the x axis.
num_barbs_y : int
The number of wind barbs to plot in the y axis.
cmap : matplotlib.colors.LinearSegmentedColormap
A color map to use with wind barbs. If this is set the plt.barbs
routine will be passed the C parameter scaled as sqrt of sum of the
squares and used with the passed in color map. A colorbar will also
be added. Setting the limits of the colorbar can be done with 'clim'.
Setting this changes the wind barbs from black to colors.
use_var_for_y : str
Set this to the name of a data variable in the Dataset to use as the
y-axis variable instead of the default dimension. Useful for instances
where data has an index-based dimension instead of a height-based
dimension. If shapes of arrays do not match it will automatically
revert back to the original ydata.
**kwargs : keyword arguments
Additional keyword arguments will be passed into plt.barbs.
Returns
-------
ax : matplotlib axis handle
The axis handle that contains the reference to the
constructed plot.
"""
if dsname is None and len(self._ds.keys()) > 1:
raise ValueError(
'You must choose a datastream when there are 2 '
'or more datasets in the TimeSeriesDisplay '
'object.'
)
elif dsname is None:
dsname = list(self._ds.keys())[0]
# Get data and dimensions
u = self._ds[dsname][u_field].values
v = self._ds[dsname][v_field].values
dim = list(self._ds[dsname][u_field].dims)
xdata = self._ds[dsname][dim[0]].values
num_x = xdata.shape[-1]
barb_step_x = round(num_x / num_barbs_x)
if barb_step_x == 0:
barb_step_x = 1
if len(dim) > 1 and pres_field is None:
if use_var_for_y is None:
ydata = self._ds[dsname][dim[1]]
else:
ydata = self._ds[dsname][use_var_for_y]
ydata_dim1 = self._ds[dsname][dim[1]]
if np.shape(ydata) != np.shape(ydata_dim1):
ydata = ydata_dim1
if 'units' in ydata.attrs:
units = ydata.attrs['units']
else:
units = ''
ytitle = ''.join(['(', units, ')'])
num_y = ydata.shape[0]
barb_step_y = round(num_y / num_barbs_y)
if barb_step_y == 0:
barb_step_y = 1
xdata, ydata = np.meshgrid(xdata, ydata, indexing='ij')
elif pres_field is not None:
# What we will do here is do a nearest-neighbor interpolation
# for each member of the series. Coordinates are time, pressure
pres = self._ds[dsname][pres_field]
u_interp = NearestNDInterpolator((xdata, pres.values), u, rescale=True)
v_interp = NearestNDInterpolator((xdata, pres.values), v, rescale=True)
barb_step_x = 1
barb_step_y = 1
x_times = pd.date_range(xdata.min(), xdata.max(), periods=num_barbs_x)
if num_barbs_y == 1:
y_levels = pres.mean()
else:
y_levels = np.linspace(np.nanmin(pres), np.nanmax(pres), num_barbs_y)
xdata, ydata = np.meshgrid(x_times, y_levels, indexing='ij')
u = u_interp(xdata, ydata)
v = v_interp(xdata, ydata)
if 'units' in pres.attrs:
units = pres.attrs['units']
else:
units = ''
ytitle = ''.join(['(', units, ')'])
else:
ydata = None
# Get the current plotting axis, add day/night background and plot data
if self.fig is None:
self.fig = plt.figure()
if self.axes is None:
self.axes = np.array([plt.axes()])
self.fig.add_axes(self.axes[0])
if ydata is None:
ydata = np.ones(xdata.shape)
if 'cmap' in kwargs.keys():
map_color = np.sqrt(np.power(u[::barb_step_x], 2) + np.power(v[::barb_step_x], 2))
map_color[np.isnan(map_color)] = 0
ax = self.axes[subplot_index].barbs(
xdata[::barb_step_x],
ydata[::barb_step_x],
u[::barb_step_x],
v[::barb_step_x],
map_color,
**kwargs,
)
plt.colorbar(
ax,
ax=[self.axes[subplot_index]],
label='Wind Speed (' + self._ds[dsname][u_field].attrs['units'] + ')',
)
else:
self.axes[subplot_index].barbs(
xdata[::barb_step_x],
ydata[::barb_step_x],
u[::barb_step_x],
v[::barb_step_x],
**kwargs,
)
self.axes[subplot_index].set_yticks([])
else:
if 'cmap' in kwargs.keys():
map_color = np.sqrt(
np.power(u[::barb_step_x, ::barb_step_y], 2)
+ np.power(v[::barb_step_x, ::barb_step_y], 2)
)
map_color[np.isnan(map_color)] = 0
ax = self.axes[subplot_index].barbs(
xdata[::barb_step_x, ::barb_step_y],
ydata[::barb_step_x, ::barb_step_y],
u[::barb_step_x, ::barb_step_y],
v[::barb_step_x, ::barb_step_y],
map_color,
**kwargs,
)
plt.colorbar(
ax,
ax=[self.axes[subplot_index]],
label='Wind Speed (' + self._ds[dsname][u_field].attrs['units'] + ')',
)
else:
ax = self.axes[subplot_index].barbs(
xdata[::barb_step_x, ::barb_step_y],
ydata[::barb_step_x, ::barb_step_y],
u[::barb_step_x, ::barb_step_y],
v[::barb_step_x, ::barb_step_y],
**kwargs,
)
if day_night_background is True:
self.day_night_background(subplot_index=subplot_index, dsname=dsname)
# Set Title
if set_title is None:
set_title = ' '.join(
[
dsname,
'on',
dt_utils.numpy_to_arm_date(self._ds[dsname].time.values[0]),
]
)
self.axes[subplot_index].set_title(set_title)
# Set YTitle
if 'ytitle' in locals():
self.axes[subplot_index].set_ylabel(ytitle)
# Set X Limit - We want the same time axes for all subplots
time_rng = [xdata.min(), xdata.max()]
self.set_xrng(time_rng, subplot_index)
# Set Y Limit
if hasattr(self, 'yrng'):
# Make sure that the yrng is not just the default
if not np.all(self.yrng[subplot_index] == 0):
self.set_yrng(self.yrng[subplot_index], subplot_index)
else:
if ydata is None:
our_data = xdata
else:
our_data = ydata
if np.isfinite(our_data).any():
if invert_y_axis is False:
yrng = [np.nanmin(our_data), np.nanmax(our_data)]
else:
yrng = [np.nanmax(our_data), np.nanmin(our_data)]
else:
yrng = [0, 1]
self.set_yrng(yrng, subplot_index)
# Set X Format
if len(subplot_index) == 1:
days = self.xrng[subplot_index, 1] - self.xrng[subplot_index, 0]
else:
days = (
self.xrng[subplot_index[0], subplot_index[1], 1]
- self.xrng[subplot_index[0], subplot_index[1], 0]
)
# Put on an xlabel, but only if we are making the bottom-most plot
if subplot_index[0] == self.axes.shape[0] - 1:
self.axes[subplot_index].set_xlabel('Time [UTC]')
myFmt = common.get_date_format(days)
self.axes[subplot_index].xaxis.set_major_formatter(myFmt)
return self.axes[subplot_index]
def plot_time_height_xsection_from_1d_data(
self,
data_field,
pres_field,
dsname=None,
subplot_index=(0,),
set_title=None,
day_night_background=False,
num_time_periods=20,
num_y_levels=20,
invert_y_axis=True,
cbar_label=None,
set_shading='auto',
**kwargs,
):
"""
This will plot a time-height cross section from 1D datasets using
nearest neighbor interpolation on a regular time by height grid.
All that is needed are a data variable and a height variable.
Parameters
----------
data_field : str
The name of the field to plot.
pres_field : str
The name of the height or pressure field to plot.
dsname : str or None
The name of the datastream to plot
subplot_index : 2-tuple
The index of the subplot to create the plot on.
set_title : str or None
The title of the plot.
day_night_background : bool
Set to true to plot the day/night background.
num_time_periods : int
Set to determine how many time periods. Setting to None
will do one time period per day.
num_y_levels : int
The number of levels in the y axis to use.
invert_y_axis : bool
Set to true to invert the y-axis (recommended for
pressure coordinates).
cbar_label : str
Option to overwrite default colorbar label.
set_shading : string
Option to to set the matplotlib.pcolormesh shading parameter.
Default to 'auto'
**kwargs : keyword arguments
Additional keyword arguments will be passed
into :func:`plt.pcolormesh`
Returns
-------
ax : matplotlib axis handle
The matplotlib axis handle pointing to the plot.
"""
if dsname is None and len(self._ds.keys()) > 1:
raise ValueError(
'You must choose a datastream when there are 2'
'or more datasets in the TimeSeriesDisplay'
'object.'
)
elif dsname is None:
dsname = list(self._ds.keys())[0]
dim = list(self._ds[dsname][data_field].dims)
if len(dim) > 1:
raise ValueError(
'plot_time_height_xsection_from_1d_data only '
'supports 1-D datasets. For datasets with 2 or '
'more dimensions use plot().'
)
# Get data and dimensions
data = self._ds[dsname][data_field].values
xdata = self._ds[dsname][dim[0]].values
# What we will do here is do a nearest-neighbor interpolation for each
# member of the series. Coordinates are time, pressure
pres = self._ds[dsname][pres_field]
u_interp = NearestNDInterpolator((xdata, pres.values), data, rescale=True)
# Mask points where we have no data
# Count number of unique days
x_times = pd.date_range(xdata.min(), xdata.max(), periods=num_time_periods)
y_levels = np.linspace(np.nanmin(pres), np.nanmax(pres), num_y_levels)
tdata, ydata = np.meshgrid(x_times, y_levels, indexing='ij')
data = u_interp(tdata, ydata)
ytitle = ''.join(['(', pres.attrs['units'], ')'])
units = data_field + ' (' + self._ds[dsname][data_field].attrs['units'] + ')'
# Get the current plotting axis, add day/night background and plot data
if self.fig is None:
self.fig = plt.figure()
if self.axes is None:
self.axes = np.array([plt.axes()])
self.fig.add_axes(self.axes[0])
mesh = self.axes[subplot_index].pcolormesh(
x_times, y_levels, np.transpose(data), shading=set_shading, **kwargs
)
if day_night_background is True:
self.day_night_background(subplot_index=subplot_index, dsname=dsname)
# Set Title
if set_title is None:
set_title = ' '.join(
[
dsname,
'on',
dt_utils.numpy_to_arm_date(self._ds[dsname].time.values[0]),
]
)
self.axes[subplot_index].set_title(set_title)
# Set YTitle
if 'ytitle' in locals():
self.axes[subplot_index].set_ylabel(ytitle)
# Set X Limit - We want the same time axes for all subplots
time_rng = [x_times[0], x_times[-1]]
self.set_xrng(time_rng, subplot_index)
# Set Y Limit
if hasattr(self, 'yrng'):
# Make sure that the yrng is not just the default
if not np.all(self.yrng[subplot_index] == 0):
self.set_yrng(self.yrng[subplot_index], subplot_index)
else:
if ydata is None:
our_data = data.values
else:
our_data = ydata
if np.isfinite(our_data).any():
if invert_y_axis is False:
yrng = [np.nanmin(our_data), np.nanmax(our_data)]
else:
yrng = [np.nanmax(our_data), np.nanmin(our_data)]
else:
yrng = [0, 1]
self.set_yrng(yrng, subplot_index)
# Set X Format
if len(subplot_index) == 1:
days = self.xrng[subplot_index, 1] - self.xrng[subplot_index, 0]
else:
days = (
self.xrng[subplot_index[0], subplot_index[1], 1]
- self.xrng[subplot_index[0], subplot_index[1], 0]
)
# Put on an xlabel, but only if we are making the bottom-most plot
if subplot_index[0] == self.axes.shape[0] - 1:
self.axes[subplot_index].set_xlabel('Time [UTC]')
if ydata is not None:
if cbar_label is None:
self.add_colorbar(mesh, title=units, subplot_index=subplot_index)
else:
self.add_colorbar(mesh, title=cbar_label, subplot_index=subplot_index)
myFmt = common.get_date_format(days)
self.axes[subplot_index].xaxis.set_major_formatter(myFmt)
return self.axes[subplot_index]
def time_height_scatter(
self,
data_field=None,
dsname=None,
cmap='rainbow',
alt_label=None,
alt_field='alt',
cb_label=None,
**kwargs,
):
"""
Create a time series plot of altitude and data variable with
color also indicating value with a color bar. The Color bar is
positioned to serve both as the indicator of the color intensity
and the second y-axis.
Parameters
----------
data_field : str
Name of data field in the dataset to plot on second y-axis.
height_field : str
Name of height field in the dataset to plot on first y-axis.
dsname : str or None
The name of the datastream to plot.
cmap : str
Colorbar color map to use.
alt_label : str
Altitude first y-axis label to use. If None, will try to use
long_name and units.
alt_field : str
Label for field in the dataset to plot on first y-axis.
cb_label : str
Colorbar label to use. If not set will try to use
long_name and units.
**kwargs : keyword arguments
Any other keyword arguments that will be passed
into TimeSeriesDisplay.plot module when the figure
is made.
"""
if dsname is None and len(self._ds.keys()) > 1:
raise ValueError(
'You must choose a datastream when there are 2 '
'or more datasets in the TimeSeriesDisplay '
'object.'
)
elif dsname is None:
dsname = list(self._ds.keys())[0]
# Get data and dimensions
data = self._ds[dsname][data_field]
altitude = self._ds[dsname][alt_field]
dim = list(self._ds[dsname][data_field].dims)
xdata = self._ds[dsname][dim[0]]
if alt_label is None:
try:
alt_label = altitude.attrs['long_name'] + ''.join(
[' (', altitude.attrs['units'], ')']
)
except KeyError:
alt_label = alt_field
if cb_label is None:
try:
cb_label = data.attrs['long_name'] + ''.join([' (', data.attrs['units'], ')'])
except KeyError:
cb_label = data_field
colorbar_map = plt.cm.get_cmap(cmap)
self.fig.subplots_adjust(left=0.1, right=0.86, bottom=0.16, top=0.91)
ax1 = self.plot(alt_field, color='black', **kwargs)
ax1.set_ylabel(alt_label)
ax2 = ax1.twinx()
sc = ax2.scatter(xdata.values, data.values, c=data.values, marker='.', cmap=colorbar_map)
cbaxes = self.fig.add_axes(
[
self.fig.subplotpars.right + 0.02,
self.fig.subplotpars.bottom,
0.02,
self.fig.subplotpars.top - self.fig.subplotpars.bottom,
]
)
cbar = plt.colorbar(sc, cax=cbaxes)
ax2.set_ylim(cbar.mappable.get_clim())
cbar.ax.set_ylabel(cb_label)
ax2.set_yticklabels([])
return self.axes[0]
def qc_flag_block_plot(
self,
data_field=None,
dsname=None,
subplot_index=(0,),
time_rng=None,
assessment_color=None,
edgecolor='face',
set_shading='auto',
cb_friendly=False,
**kwargs,
):
"""
Create a time series plot of embedded quality control values
using broken barh plotting.
Parameters
----------
data_field : str
Name of data field in the dataset to plot corresponding quality
control.
dsname : None or str
If there is more than one datastream in the display object the
name of the datastream needs to be specified. If set to None and
there is only one datastream ACT will use the sole datastream
in the object.
subplot_index : 1 or 2D tuple, list, or array
The index of the subplot to set the x range of.
time_rng : tuple or list
List or tuple with (min, max) values to set the x-axis range limits.
assessment_color : dict
Dictionary lookup to override default assessment to color. Make sure
assessment work is correctly set with case syntax.
edgecolor : str or list
Color name, list of color names or 'face' as defined in matplotlib.axes.Axes.broken_barh
set_shading : string
Option to to set the matplotlib.pcolormesh shading parameter.
Default to 'auto'
cb_friendly : boolean
Set to true if you want to use the integrated colorblind friendly
colors for green/red based on the Homeyer colormap
**kwargs : keyword arguments
The keyword arguments for :func:`plt.broken_barh`.
"""
# Color to plot associated with assessment.
color_lookup = {
'Bad': 'red',
'Incorrect': 'red',
'Indeterminate': 'orange',
'Suspect': 'orange',
'Missing': 'darkgray',
'Not Failing': 'green',
'Acceptable': 'green',
}
if cb_friendly:
color_lookup['Bad'] = (0.9285714285714286, 0.7130901016453677, 0.7130901016453677)
color_lookup['Incorrect'] = (0.9285714285714286, 0.7130901016453677, 0.7130901016453677)
color_lookup['Not Failing'] = (0.0, 0.4240129715562796, 0.4240129715562796),
color_lookup['Acceptable'] = (0.0, 0.4240129715562796, 0.4240129715562796),
if assessment_color is not None:
for asses, color in assessment_color.items():
color_lookup[asses] = color
if asses == 'Incorrect':
color_lookup['Bad'] = color
if asses == 'Suspect':
color_lookup['Indeterminate'] = color
# Set up list of test names to use for missing values
missing_val_long_names = [
'Value equal to missing_value*',
'Value set to missing_value*',
'Value is equal to missing_value*',
'Value is set to missing_value*',
]
if dsname is None and len(self._ds.keys()) > 1:
raise ValueError(
'You must choose a datastream when there are 2 '
'or more datasets in the TimeSeriesDisplay '
'object.'
)
elif dsname is None:
dsname = list(self._ds.keys())[0]
# Set up or get current plot figure
if self.fig is None:
self.fig = plt.figure()
# Set up or get current axes
if self.axes is None:
self.axes = np.array([plt.axes()])
self.fig.add_axes(self.axes[0])
ax = self.axes[subplot_index]
# Set X Limit - We want the same time axes for all subplots
data = self._ds[dsname][data_field]
dim = list(self._ds[dsname][data_field].dims)
xdata = self._ds[dsname][dim[0]]
# Get data and attributes
qc_data_field = self._ds[dsname].qcfilter.check_for_ancillary_qc(
data_field, add_if_missing=False, cleanup=False
)
if qc_data_field is None:
raise ValueError(f'No quality control ancillary variable in Dataset for {data_field}')
flag_masks = self._ds[dsname][qc_data_field].attrs['flag_masks']
flag_meanings = self._ds[dsname][qc_data_field].attrs['flag_meanings']
flag_assessments = self._ds[dsname][qc_data_field].attrs['flag_assessments']
# Get time ranges for green blocks
time_delta = determine_time_delta(xdata.values)
barh_list_green = reduce_time_ranges(xdata.values, time_delta=time_delta, broken_barh=True)
# Set background to gray indicating not available data
ax.set_facecolor('dimgray')
# Check if plotting 2D data vs 1D data. 2D data will be summarized by
# assessment category instead of showing each test.
data_shape = self._ds[dsname][qc_data_field].shape
if len(data_shape) > 1:
cur_assessments = list(set(flag_assessments))
cur_assessments.sort()
cur_assessments.reverse()
qc_data = np.full(data_shape, -1, dtype=np.int16)
plot_colors = []
tick_names = []
index = self._ds[dsname][qc_data_field].values == 0
if index.any():
qc_data[index] = 0
plot_colors.append(color_lookup['Not Failing'])
tick_names.append('Not Failing')
for ii, assess in enumerate(cur_assessments):
if assess not in color_lookup:
color_lookup[assess] = list(mplcolors.CSS4_COLORS.keys())[ii]
ii += 1
assess_data = self._ds[dsname].qcfilter.get_masked_data(
data_field, rm_assessments=assess
)
if assess_data.mask.any():
qc_data[assess_data.mask] = ii
plot_colors.append(color_lookup[assess])
tick_names.append(assess)
# Overwrite missing data. Not sure if we want to do this because VAPs set
# the value to missing but the test is set to Bad. This tries to overcome that
# by looking for correct test description that would only indicate the values
# are missing not that they are set to missing by a test... most likely.
missing_test_nums = []
for ii, flag_meaning in enumerate(flag_meanings):
# Check if the bit set is indicating missing data.
for val in missing_val_long_names:
if re_search(val, flag_meaning):
test_num = parse_bit(flag_masks[ii])[0]
missing_test_nums.append(test_num)
assess_data = self._ds[dsname].qcfilter.get_masked_data(
data_field, rm_tests=missing_test_nums
)
if assess_data.mask.any():
qc_data[assess_data.mask] = -1
plot_colors.append(color_lookup['Missing'])
tick_names.append('Missing')
# Create a masked array to allow not plotting where values are missing
qc_data = np.ma.masked_equal(qc_data, -1)
dims = self._ds[dsname][qc_data_field].dims
xvalues = self._ds[dsname][dims[0]].values
yvalues = self._ds[dsname][dims[1]].values
cMap = mplcolors.ListedColormap(plot_colors)
mesh = ax.pcolormesh(
xvalues,
yvalues,
np.transpose(qc_data),
cmap=cMap,
vmin=0,
shading=set_shading,
)
divider = make_axes_locatable(ax)
# Determine correct placement of words on colorbar
tick_nums = (
np.arange(0, len(tick_names) * 2 + 1) / (len(tick_names) * 2) * np.nanmax(qc_data)
)[1::2]
cax = divider.append_axes('bottom', size='5%', pad=0.3)
cbar = self.fig.colorbar(
mesh,
cax=cax,
orientation='horizontal',
spacing='uniform',
ticks=tick_nums,
shrink=0.5,
)
cbar.ax.set_xticklabels(tick_names)
# Set YTitle
dim_name = list(set(self._ds[dsname][qc_data_field].dims) - {'time'})
try:
ytitle = f"{dim_name[0]} ({self._ds[dsname][dim_name[0]].attrs['units']})"
ax.set_ylabel(ytitle)
except KeyError:
pass
# Add which tests were set as text to the plot
unique_values = []
for ii in np.unique(self._ds[dsname][qc_data_field].values):
unique_values.extend(parse_bit(ii))
if len(unique_values) > 0:
unique_values = list(set(unique_values))
unique_values.sort()
unique_values = [str(ii) for ii in unique_values]
self.fig.text(
0.5,
-0.35,
f"QC Tests Tripped: {', '.join(unique_values)}",
transform=ax.transAxes,
horizontalalignment='center',
verticalalignment='center',
fontweight='bold',
)
else:
test_nums = []
for ii, assess in enumerate(flag_assessments):
if assess not in color_lookup:
color_lookup[assess] = list(mplcolors.CSS4_COLORS.keys())[ii]
# Plot green data first.
ax.broken_barh(
barh_list_green,
(ii, ii + 1),
facecolors=color_lookup['Not Failing'],
edgecolor=edgecolor,
**kwargs,
)
# Get test number from flag_mask bitpacked number
test_nums.append(parse_bit(flag_masks[ii]))
# Get masked array data to use mask for finding if/where test is set
data = self._ds[dsname].qcfilter.get_masked_data(
data_field, rm_tests=test_nums[-1]
)
if np.any(data.mask):
# Get time ranges from time and masked data
barh_list = reduce_time_ranges(
xdata.values[data.mask], time_delta=time_delta, broken_barh=True
)
# Check if the bit set is indicating missing data. If so change
# to different plotting color than what is in flag_assessments.
for val in missing_val_long_names:
if re_search(val, flag_meanings[ii]):
assess = 'Missing'
break
# Lay down blocks of tripped tests using correct color
ax.broken_barh(
barh_list,
(ii, ii + 1),
facecolors=color_lookup[assess],
edgecolor=edgecolor,
**kwargs,
)
# Add test description to plot.
ax.text(xdata.values[0], ii + 0.5, ' ' + flag_meanings[ii], va='center')
# Change y ticks to test number
plt.yticks(
[ii + 0.5 for ii in range(0, len(test_nums))],
labels=['Test ' + str(ii[0]) for ii in test_nums],
)
# Set ylimit to number of tests plotted
ax.set_ylim(0, len(flag_assessments))
# Set X Limit - We want the same time axes for all subplots
if not hasattr(self, 'time_rng'):
if time_rng is not None:
self.time_rng = list(time_rng)
else:
self.time_rng = [xdata.min().values, xdata.max().values]
self.set_xrng(self.time_rng, subplot_index)
# Get X format - We want the same time axes for all subplots
if hasattr(self, 'time_fmt'):
ax.xaxis.set_major_formatter(self.time_fmt)
else:
# Set X Format
if len(subplot_index) == 1:
days = self.xrng[subplot_index, 1] - self.xrng[subplot_index, 0]
else:
days = (
self.xrng[subplot_index[0], subplot_index[1], 1]
- self.xrng[subplot_index[0], subplot_index[1], 0]
)
myFmt = common.get_date_format(days)
ax.xaxis.set_major_formatter(myFmt)
self.time_fmt = myFmt
return self.axes[subplot_index]
def fill_between(
self,
field,
dsname=None,
subplot_index=(0,),
set_title=None,
secondary_y=False,
**kwargs,
):
"""
Makes a fill_between plot, based on matplotlib
Parameters
----------
field : str
The name of the field to plot.
dsname : None or str
If there is more than one datastream in the display object the
name of the datastream needs to be specified. If set to None and
there is only one datastream ACT will use the sole datastream
in the object.
subplot_index : 1 or 2D tuple, list, or array
The index of the subplot to set the x range of.
set_title : str
The title for the plot.
secondary_y : boolean
Option to indicate if the data should be plotted on second y-axis.
**kwargs : keyword arguments
The keyword arguments for :func:`plt.plot` (1D timeseries) or
:func:`plt.pcolormesh` (2D timeseries).
Returns
-------
ax : matplotlib axis handle
The matplotlib axis handle of the plot.
"""
if dsname is None and len(self._ds.keys()) > 1:
raise ValueError(
'You must choose a datastream when there are 2 '
'or more datasets in the TimeSeriesDisplay '
'object.'
)
elif dsname is None:
dsname = list(self._ds.keys())[0]
# Get data and dimensions
data = self._ds[dsname][field]
dim = list(self._ds[dsname][field].dims)
xdata = self._ds[dsname][dim[0]]
if 'units' in data.attrs:
ytitle = ''.join(['(', data.attrs['units'], ')'])
else:
ytitle = field
# Get the current plotting axis, add day/night background and plot data
if self.fig is None:
self.fig = plt.figure()
if self.axes is None:
self.axes = np.array([plt.axes()])
self.fig.add_axes(self.axes[0])
# Set ax to appropriate axis
if secondary_y is False:
ax = self.axes[subplot_index]
else:
ax = self.axes[subplot_index].twinx()
ax.fill_between(xdata.values, data, **kwargs)
# Set X Format
if len(subplot_index) == 1:
days = self.xrng[subplot_index, 1] - self.xrng[subplot_index, 0]
else:
days = (
self.xrng[subplot_index[0], subplot_index[1], 1]
- self.xrng[subplot_index[0], subplot_index[1], 0]
)
myFmt = common.get_date_format(days)
ax.xaxis.set_major_formatter(myFmt)
# Set X format - We want the same time axes for all subplots
if not hasattr(self, 'time_fmt'):
self.time_fmt = myFmt
# Put on an xlabel, but only if we are making the bottom-most plot
if subplot_index[0] == self.axes.shape[0] - 1:
self.axes[subplot_index].set_xlabel('Time [UTC]')
# Set YTitle
ax.set_ylabel(ytitle)
# Set Title
if set_title is None:
set_title = ' '.join(
[
dsname,
field,
'on',
dt_utils.numpy_to_arm_date(self._ds[dsname].time.values[0]),
]
)
if secondary_y is False:
ax.set_title(set_title)
return self.axes[subplot_index]
<|code_end|>
|
Missing kwargs in plot_stacked_bar_graph
### Description
I just saw this in the code and wanted to report it. I think we need to pass in kawrgs to the below line in plot_stacked_bar_graph.
'''
my_hist, bins = np.histogram(xdata.values.flatten(), bins=bins, density=density)
'''
|
act/plotting/histogramdisplay.py
<|code_start|>""" Module for Histogram Plotting. """
import matplotlib.pyplot as plt
import numpy as np
import xarray as xr
from ..utils import datetime_utils as dt_utils
from .plot import Display
class HistogramDisplay(Display):
"""
This class is used to make histogram plots. It is inherited from Display
and therefore contains all of Display's attributes and methods.
Examples
--------
To create a TimeSeriesDisplay with 3 rows, simply do:
.. code-block:: python
ds = act.read_netcdf(the_file)
disp = act.plotting.HistogramDisplay(ds, subplot_shape=(3,), figsize=(15, 5))
The HistogramDisplay constructor takes in the same keyword arguments as
plt.subplots. For more information on the plt.subplots keyword arguments,
see the `matplotlib documentation
<https://matplotlib.org/api/_as_gen/matplotlib.pyplot.subplots.html>`_.
If no subplot_shape is provided, then no figure or axis will be created
until add_subplots or plots is called.
"""
def __init__(self, ds, subplot_shape=(1,), ds_name=None, **kwargs):
super().__init__(ds, subplot_shape, ds_name, **kwargs)
def set_xrng(self, xrng, subplot_index=(0,)):
"""
Sets the x range of the plot.
Parameters
----------
xrng : 2 number array
The x limits of the plot.
subplot_index : 1 or 2D tuple, list, or array
The index of the subplot to set the x range of.
"""
if self.axes is None:
raise RuntimeError('set_xrng requires the plot to be displayed.')
if not hasattr(self, 'xrng') and len(self.axes.shape) == 2:
self.xrng = np.zeros((self.axes.shape[0], self.axes.shape[1], 2), dtype='datetime64[D]')
elif not hasattr(self, 'xrng') and len(self.axes.shape) == 1:
self.xrng = np.zeros((self.axes.shape[0], 2), dtype='datetime64[D]')
self.axes[subplot_index].set_xlim(xrng)
self.xrng[subplot_index, :] = np.array(xrng)
def set_yrng(self, yrng, subplot_index=(0,)):
"""
Sets the y range of the plot.
Parameters
----------
yrng : 2 number array
The y limits of the plot.
subplot_index : 1 or 2D tuple, list, or array
The index of the subplot to set the x range of.
"""
if self.axes is None:
raise RuntimeError('set_yrng requires the plot to be displayed.')
if not hasattr(self, 'yrng') and len(self.axes.shape) == 2:
self.yrng = np.zeros((self.axes.shape[0], self.axes.shape[1], 2))
elif not hasattr(self, 'yrng') and len(self.axes.shape) == 1:
self.yrng = np.zeros((self.axes.shape[0], 2))
if yrng[0] == yrng[1]:
yrng[1] = yrng[1] + 1
self.axes[subplot_index].set_ylim(yrng)
self.yrng[subplot_index, :] = yrng
def _get_data(self, dsname, fields):
if isinstance(fields, str):
fields = [fields]
return self._ds[dsname][fields].dropna('time')
def plot_stacked_bar_graph(
self,
field,
dsname=None,
bins=None,
sortby_field=None,
sortby_bins=None,
subplot_index=(0,),
set_title=None,
density=False,
**kwargs,
):
"""
This procedure will plot a stacked bar graph of a histogram.
Parameters
----------
field : str
The name of the field to take the histogram of.
dsname : str or None
The name of the datastream the field is contained in. Set
to None to let ACT automatically determine this.
bins : array-like or None
The histogram bin boundaries to use. Set to None to use
numpy's default boundaries.
sortby_field : str or None
Set this option to a field name in order to sort the histograms
by a given field parameter. For example, one can sort histograms of CO2
concentration by temperature.
sortby_bins : array-like or None
The bins to sort the histograms by.
subplot_index : tuple
The subplot index to place the plot in
set_title : str
The title of the plot.
density: bool
Set to True to plot a p.d.f. instead of a frequency histogram.
Other keyword arguments will be passed into :func:`matplotlib.pyplot.bar`.
Returns
-------
return_dict : dict
A dictionary containing the plot axis handle, bin boundaries, and
generated histogram.
"""
if dsname is None and len(self._ds.keys()) > 1:
raise ValueError(
'You must choose a datastream when there are 2 '
+ 'or more datasets in the TimeSeriesDisplay '
+ 'object.'
)
elif dsname is None:
dsname = list(self._ds.keys())[0]
if sortby_field is not None:
ds = self._get_data(dsname, [field, sortby_field])
xdata, ydata = ds[field], ds[sortby_field]
else:
xdata = self._get_data(dsname, field)[field]
if 'units' in xdata.attrs:
xtitle = ''.join(['(', xdata.attrs['units'], ')'])
else:
xtitle = field
if bins is not None and sortby_bins is None and sortby_field is not None:
# We will defaut the y direction to have the same # of bins as x
sortby_bins = np.linspace(ydata.values.min(), ydata.values.max(), len(bins))
# Get the current plotting axis, add day/night background and plot data
if self.fig is None:
self.fig = plt.figure()
if self.axes is None:
self.axes = np.array([plt.axes()])
self.fig.add_axes(self.axes[0])
if sortby_field is not None:
if 'units' in ydata.attrs:
ytitle = ''.join(['(', ydata.attrs['units'], ')'])
else:
ytitle = field
if bins is None:
my_hist, x_bins, y_bins = np.histogram2d(
xdata.values.flatten(), ydata.values.flatten(), density=density
)
else:
my_hist, x_bins, y_bins = np.histogram2d(
xdata.values.flatten(),
ydata.values.flatten(),
density=density,
bins=[bins, sortby_bins],
)
x_inds = (x_bins[:-1] + x_bins[1:]) / 2.0
self.axes[subplot_index].bar(
x_inds,
my_hist[:, 0].flatten(),
label=(str(y_bins[0]) + ' to ' + str(y_bins[1])),
**kwargs,
)
for i in range(1, len(y_bins) - 1):
self.axes[subplot_index].bar(
x_inds,
my_hist[:, i].flatten(),
bottom=my_hist[:, i - 1],
label=(str(y_bins[i]) + ' to ' + str(y_bins[i + 1])),
**kwargs,
)
self.axes[subplot_index].legend()
else:
if bins is None:
bmin = np.nanmin(xdata)
bmax = np.nanmax(xdata)
bins = np.arange(bmin, bmax, (bmax - bmin) / 10.0)
my_hist, bins = np.histogram(xdata.values.flatten(), bins=bins, density=density)
x_inds = (bins[:-1] + bins[1:]) / 2.0
self.axes[subplot_index].bar(x_inds, my_hist)
# Set Title
if set_title is None:
set_title = ' '.join(
[
dsname,
field,
'on',
dt_utils.numpy_to_arm_date(self._ds[dsname].time.values[0]),
]
)
self.axes[subplot_index].set_title(set_title)
self.axes[subplot_index].set_ylabel('count')
self.axes[subplot_index].set_xlabel(xtitle)
return_dict = {}
return_dict['plot_handle'] = self.axes[subplot_index]
if 'x_bins' in locals():
return_dict['x_bins'] = x_bins
return_dict['y_bins'] = y_bins
else:
return_dict['bins'] = bins
return_dict['histogram'] = my_hist
return return_dict
def plot_size_distribution(
self, field, bins, time=None, dsname=None, subplot_index=(0,), set_title=None, **kwargs
):
"""
This procedure plots a stairstep plot of a size distribution. This is
useful for plotting size distributions and waveforms.
Parameters
----------
field : str
The name of the field to plot the spectrum from.
bins : str or array-like
The name of the field that stores the bins for the spectra.
time : none or datetime
If None, spectra to plot will be automatically determined.
Otherwise, specify this field for the time period to plot.
dsname : str
The name of the Dataset to plot. Set to None to have
ACT automatically determine this.
subplot_index : tuple
The subplot index to place the plot in.
set_title : str or None
Use this to set the title.
Additional keyword arguments will be passed into :func:`matplotlib.pyplot.step`
Returns
-------
ax : matplotlib axis handle
The matplotlib axis handle referring to the plot.
"""
if dsname is None and len(self._ds.keys()) > 1:
raise ValueError(
'You must choose a datastream when there are 2 '
+ 'or more datasets in the TimeSeriesDisplay '
+ 'object.'
)
elif dsname is None:
dsname = list(self._ds.keys())[0]
xdata = self._get_data(dsname, field)[field]
if isinstance(bins, str):
bins = self._ds[dsname][bins]
else:
bins = xr.DataArray(bins)
if 'units' in bins.attrs:
xtitle = ''.join(['(', bins.attrs['units'], ')'])
else:
xtitle = 'Bin #'
if 'units' in xdata.attrs:
ytitle = ''.join(['(', xdata.attrs['units'], ')'])
else:
ytitle = field
if len(xdata.dims) > 1 and time is None:
raise ValueError(
'Input data has more than one dimension, ' + 'you must specify a time to plot!'
)
elif len(xdata.dims) > 1:
xdata = xdata.sel(time=time, method='nearest')
if len(bins.dims) > 1 or len(bins.values) != len(xdata.values):
raise ValueError(
'Bins must be a one dimensional field whose '
+ 'length is equal to the field length!'
)
# Get the current plotting axis, add day/night background and plot data
if self.fig is None:
self.fig = plt.figure()
if self.axes is None:
self.axes = np.array([plt.axes()])
self.fig.add_axes(self.axes[0])
# Set Title
if set_title is None:
set_title = ' '.join(
[
dsname,
field,
'on',
dt_utils.numpy_to_arm_date(self._ds[dsname].time.values[0]),
]
)
self.axes[subplot_index].set_title(set_title)
self.axes[subplot_index].step(bins.values, xdata.values)
self.axes[subplot_index].set_xlabel(xtitle)
self.axes[subplot_index].set_ylabel(ytitle)
return self.axes[subplot_index]
def plot_stairstep_graph(
self,
field,
dsname=None,
bins=None,
sortby_field=None,
sortby_bins=None,
subplot_index=(0,),
set_title=None,
density=False,
**kwargs,
):
"""
This procedure will plot a stairstep plot of a histogram.
Parameters
----------
field : str
The name of the field to take the histogram of.
dsname : str or None
The name of the datastream the field is contained in. Set
to None to let ACT automatically determine this.
bins : array-like or None
The histogram bin boundaries to use. Set to None to use
numpy's default boundaries.
sortby_field : str or None
Set this option to a field name in order to sort the histograms
by a given field parameter. For example, one can sort histograms of CO2
concentration by temperature.
sortby_bins : array-like or None
The bins to sort the histograms by.
subplot_index : tuple
The subplot index to place the plot in.
set_title : str
The title of the plot.
density : bool
Set to True to plot a p.d.f. instead of a frequency histogram.
Other keyword arguments will be passed into :func:`matplotlib.pyplot.step`.
Returns
-------
return_dict : dict
A dictionary containing the plot axis handle, bin boundaries, and
generated histogram.
"""
if dsname is None and len(self._ds.keys()) > 1:
raise ValueError(
'You must choose a datastream when there are 2 '
+ 'or more datasets in the TimeSeriesDisplay '
+ 'object.'
)
elif dsname is None:
dsname = list(self._ds.keys())[0]
xdata = self._get_data(dsname, field)[field]
if 'units' in xdata.attrs:
xtitle = ''.join(['(', xdata.attrs['units'], ')'])
else:
xtitle = field
if sortby_field is not None:
ydata = self._ds[dsname][sortby_field]
if bins is not None and sortby_bins is None and sortby_field is not None:
# We will defaut the y direction to have the same # of bins as x
sortby_bins = np.linspace(ydata.values.min(), ydata.values.max(), len(bins))
# Get the current plotting axis, add day/night background and plot data
if self.fig is None:
self.fig = plt.figure()
if self.axes is None:
self.axes = np.array([plt.axes()])
self.fig.add_axes(self.axes[0])
if sortby_field is not None:
if 'units' in ydata.attrs:
ytitle = ''.join(['(', ydata.attrs['units'], ')'])
else:
ytitle = field
if bins is None:
my_hist, x_bins, y_bins = np.histogram2d(
xdata.values.flatten(), ydata.values.flatten(), density=density
)
else:
my_hist, x_bins, y_bins = np.histogram2d(
xdata.values.flatten(),
ydata.values.flatten(),
density=density,
bins=[bins, sortby_bins],
)
x_inds = (x_bins[:-1] + x_bins[1:]) / 2.0
self.axes[subplot_index].step(
x_inds,
my_hist[:, 0].flatten(),
label=(str(y_bins[0]) + ' to ' + str(y_bins[1])),
**kwargs,
)
for i in range(1, len(y_bins) - 1):
self.axes[subplot_index].step(
x_inds,
my_hist[:, i].flatten(),
label=(str(y_bins[i]) + ' to ' + str(y_bins[i + 1])),
**kwargs,
)
self.axes[subplot_index].legend()
else:
my_hist, bins = np.histogram(xdata.values.flatten(), bins=bins, density=density)
x_inds = (bins[:-1] + bins[1:]) / 2.0
self.axes[subplot_index].step(x_inds, my_hist, **kwargs)
# Set Title
if set_title is None:
set_title = ' '.join(
[
dsname,
field,
'on',
dt_utils.numpy_to_arm_date(self._ds[dsname].time.values[0]),
]
)
self.axes[subplot_index].set_title(set_title)
self.axes[subplot_index].set_ylabel('count')
self.axes[subplot_index].set_xlabel(xtitle)
return_dict = {}
return_dict['plot_handle'] = self.axes[subplot_index]
if 'x_bins' in locals():
return_dict['x_bins'] = x_bins
return_dict['y_bins'] = y_bins
else:
return_dict['bins'] = bins
return_dict['histogram'] = my_hist
return return_dict
def plot_heatmap(
self,
x_field,
y_field,
dsname=None,
x_bins=None,
y_bins=None,
subplot_index=(0,),
set_title=None,
density=False,
set_shading='auto',
**kwargs,
):
"""
This procedure will plot a heatmap of a histogram from 2 variables.
Parameters
----------
x_field : str
The name of the field to take the histogram of on the X axis.
y_field : str
The name of the field to take the histogram of on the Y axis.
dsname : str or None
The name of the datastream the field is contained in. Set
to None to let ACT automatically determine this.
x_bins : array-like or None
The histogram bin boundaries to use for the variable on the X axis.
Set to None to use numpy's default boundaries.
y_bins : array-like or None
The histogram bin boundaries to use for the variable on the Y axis.
Set to None to use numpy's default boundaries.
subplot_index : tuple
The subplot index to place the plot in
set_title : str
The title of the plot.
density : bool
Set to True to plot a p.d.f. instead of a frequency histogram.
set_shading : string
Option to to set the matplotlib.pcolormesh shading parameter.
Default to 'auto'
Other keyword arguments will be passed into :func:`matplotlib.pyplot.pcolormesh`.
Returns
-------
return_dict : dict
A dictionary containing the plot axis handle, bin boundaries, and
generated histogram.
"""
if dsname is None and len(self._ds.keys()) > 1:
raise ValueError(
'You must choose a datastream when there are 2 '
'or more datasets in the TimeSeriesDisplay '
'object.'
)
elif dsname is None:
dsname = list(self._ds.keys())[0]
ds = self._get_data(dsname, [x_field, y_field])
xdata, ydata = ds[x_field], ds[y_field]
if 'units' in xdata.attrs:
xtitle = ''.join(['(', xdata.attrs['units'], ')'])
else:
xtitle = x_field
if x_bins is not None and y_bins is None:
# We will defaut the y direction to have the same # of bins as x
y_bins = np.linspace(ydata.values.min(), ydata.values.max(), len(x_bins))
# Get the current plotting axis, add day/night background and plot data
if self.fig is None:
self.fig = plt.figure()
if self.axes is None:
self.axes = np.array([plt.axes()])
self.fig.add_axes(self.axes[0])
if 'units' in ydata.attrs:
ytitle = ''.join(['(', ydata.attrs['units'], ')'])
else:
ytitle = y_field
if x_bins is None:
my_hist, x_bins, y_bins = np.histogram2d(
xdata.values.flatten(), ydata.values.flatten(), density=density
)
else:
my_hist, x_bins, y_bins = np.histogram2d(
xdata.values.flatten(),
ydata.values.flatten(),
density=density,
bins=[x_bins, y_bins],
)
x_inds = (x_bins[:-1] + x_bins[1:]) / 2.0
y_inds = (y_bins[:-1] + y_bins[1:]) / 2.0
xi, yi = np.meshgrid(x_inds, y_inds, indexing='ij')
mesh = self.axes[subplot_index].pcolormesh(xi, yi, my_hist, shading=set_shading, **kwargs)
# Set Title
if set_title is None:
set_title = ' '.join(
[
dsname,
'on',
dt_utils.numpy_to_arm_date(self._ds[dsname].time.values[0]),
]
)
self.axes[subplot_index].set_title(set_title)
self.axes[subplot_index].set_ylabel(ytitle)
self.axes[subplot_index].set_xlabel(xtitle)
self.add_colorbar(mesh, title='count', subplot_index=subplot_index)
return_dict = {}
return_dict['plot_handle'] = self.axes[subplot_index]
return_dict['x_bins'] = x_bins
return_dict['y_bins'] = y_bins
return_dict['histogram'] = my_hist
return return_dict
<|code_end|>
examples/plotting/plot_hist_kwargs.py
<|code_start|><|code_end|>
|
act/plotting/histogramdisplay.py
<|code_start|>""" Module for Histogram Plotting. """
import matplotlib.pyplot as plt
import numpy as np
import xarray as xr
from ..utils import datetime_utils as dt_utils
from .plot import Display
class HistogramDisplay(Display):
"""
This class is used to make histogram plots. It is inherited from Display
and therefore contains all of Display's attributes and methods.
Examples
--------
To create a TimeSeriesDisplay with 3 rows, simply do:
.. code-block:: python
ds = act.read_netcdf(the_file)
disp = act.plotting.HistogramDisplay(ds, subplot_shape=(3,), figsize=(15, 5))
The HistogramDisplay constructor takes in the same keyword arguments as
plt.subplots. For more information on the plt.subplots keyword arguments,
see the `matplotlib documentation
<https://matplotlib.org/api/_as_gen/matplotlib.pyplot.subplots.html>`_.
If no subplot_shape is provided, then no figure or axis will be created
until add_subplots or plots is called.
"""
def __init__(self, ds, subplot_shape=(1,), ds_name=None, **kwargs):
super().__init__(ds, subplot_shape, ds_name, **kwargs)
def set_xrng(self, xrng, subplot_index=(0,)):
"""
Sets the x range of the plot.
Parameters
----------
xrng : 2 number array
The x limits of the plot.
subplot_index : 1 or 2D tuple, list, or array
The index of the subplot to set the x range of.
"""
if self.axes is None:
raise RuntimeError('set_xrng requires the plot to be displayed.')
if not hasattr(self, 'xrng') and len(self.axes.shape) == 2:
self.xrng = np.zeros((self.axes.shape[0], self.axes.shape[1], 2), dtype='datetime64[D]')
elif not hasattr(self, 'xrng') and len(self.axes.shape) == 1:
self.xrng = np.zeros((self.axes.shape[0], 2), dtype='datetime64[D]')
self.axes[subplot_index].set_xlim(xrng)
self.xrng[subplot_index, :] = np.array(xrng)
def set_yrng(self, yrng, subplot_index=(0,)):
"""
Sets the y range of the plot.
Parameters
----------
yrng : 2 number array
The y limits of the plot.
subplot_index : 1 or 2D tuple, list, or array
The index of the subplot to set the x range of.
"""
if self.axes is None:
raise RuntimeError('set_yrng requires the plot to be displayed.')
if not hasattr(self, 'yrng') and len(self.axes.shape) == 2:
self.yrng = np.zeros((self.axes.shape[0], self.axes.shape[1], 2))
elif not hasattr(self, 'yrng') and len(self.axes.shape) == 1:
self.yrng = np.zeros((self.axes.shape[0], 2))
if yrng[0] == yrng[1]:
yrng[1] = yrng[1] + 1
self.axes[subplot_index].set_ylim(yrng)
self.yrng[subplot_index, :] = yrng
def _get_data(self, dsname, fields):
if isinstance(fields, str):
fields = [fields]
return self._ds[dsname][fields].dropna('time')
def plot_stacked_bar_graph(
self,
field,
dsname=None,
bins=10,
sortby_field=None,
sortby_bins=None,
subplot_index=(0,),
set_title=None,
density=False,
hist_kwargs=dict(),
**kwargs,
):
"""
This procedure will plot a stacked bar graph of a histogram.
Parameters
----------
field : str
The name of the field to take the histogram of.
dsname : str or None
The name of the datastream the field is contained in. Set
to None to let ACT automatically determine this.
bins : array-like or int
The histogram bin boundaries to use. If not specified, numpy's
default 10 is used.
sortby_field : str or None
Set this option to a field name in order to sort the histograms
by a given field parameter. For example, one can sort histograms of CO2
concentration by temperature.
sortby_bins : array-like or None
The bins to sort the histograms by.
subplot_index : tuple
The subplot index to place the plot in
set_title : str
The title of the plot.
density : bool
Set to True to plot a p.d.f. instead of a frequency histogram.
hist_kwargs : dict
Additional keyword arguments to pass to numpy histogram.
Other keyword arguments will be passed into :func:`matplotlib.pyplot.bar`.
Returns
-------
return_dict : dict
A dictionary containing the plot axis handle, bin boundaries, and
generated histogram.
"""
if dsname is None and len(self._ds.keys()) > 1:
raise ValueError(
'You must choose a datastream when there are 2 '
+ 'or more datasets in the TimeSeriesDisplay '
+ 'object.'
)
elif dsname is None:
dsname = list(self._ds.keys())[0]
if sortby_field is not None:
ds = self._get_data(dsname, [field, sortby_field])
xdata, ydata = ds[field], ds[sortby_field]
else:
xdata = self._get_data(dsname, field)[field]
if 'units' in xdata.attrs:
xtitle = ''.join(['(', xdata.attrs['units'], ')'])
else:
xtitle = field
if sortby_bins is None and sortby_field is not None:
# We will defaut the y direction to have the same # of bins as x
sortby_bins = np.linspace(ydata.values.min(), ydata.values.max(), len(bins))
# Get the current plotting axis, add day/night background and plot data
if self.fig is None:
self.fig = plt.figure()
if self.axes is None:
self.axes = np.array([plt.axes()])
self.fig.add_axes(self.axes[0])
if sortby_field is not None:
if 'units' in ydata.attrs:
ytitle = ''.join(['(', ydata.attrs['units'], ')'])
else:
ytitle = field
if sortby_bins is None:
my_hist, x_bins, y_bins = np.histogram2d(
xdata.values.flatten(), ydata.values.flatten(), density=density,
bins=bins,
**hist_kwargs)
else:
my_hist, x_bins, y_bins = np.histogram2d(
xdata.values.flatten(),
ydata.values.flatten(),
density=density,
bins=[bins, sortby_bins],
**hist_kwargs)
x_inds = (x_bins[:-1] + x_bins[1:]) / 2.0
self.axes[subplot_index].bar(
x_inds,
my_hist[:, 0].flatten(),
label=(str(y_bins[0]) + ' to ' + str(y_bins[1])),
**kwargs,
)
for i in range(1, len(y_bins) - 1):
self.axes[subplot_index].bar(
x_inds,
my_hist[:, i].flatten(),
bottom=my_hist[:, i - 1],
label=(str(y_bins[i]) + ' to ' + str(y_bins[i + 1])),
**kwargs,
)
self.axes[subplot_index].legend()
else:
my_hist, bins = np.histogram(xdata.values.flatten(), bins=bins,
density=density, **hist_kwargs)
x_inds = (bins[:-1] + bins[1:]) / 2.0
self.axes[subplot_index].bar(x_inds, my_hist)
# Set Title
if set_title is None:
set_title = ' '.join(
[
dsname,
field,
'on',
dt_utils.numpy_to_arm_date(self._ds[dsname].time.values[0]),
]
)
self.axes[subplot_index].set_title(set_title)
self.axes[subplot_index].set_ylabel('count')
self.axes[subplot_index].set_xlabel(xtitle)
return_dict = {}
return_dict['plot_handle'] = self.axes[subplot_index]
if 'x_bins' in locals():
return_dict['x_bins'] = x_bins
return_dict['y_bins'] = y_bins
else:
return_dict['bins'] = bins
return_dict['histogram'] = my_hist
return return_dict
def plot_size_distribution(
self, field, bins, time=None, dsname=None, subplot_index=(0,), set_title=None, **kwargs
):
"""
This procedure plots a stairstep plot of a size distribution. This is
useful for plotting size distributions and waveforms.
Parameters
----------
field : str
The name of the field to plot the spectrum from.
bins : str or array-like
The name of the field that stores the bins for the spectra.
time : none or datetime
If None, spectra to plot will be automatically determined.
Otherwise, specify this field for the time period to plot.
dsname : str
The name of the Dataset to plot. Set to None to have
ACT automatically determine this.
subplot_index : tuple
The subplot index to place the plot in.
set_title : str or None
Use this to set the title.
Additional keyword arguments will be passed into :func:`matplotlib.pyplot.step`
Returns
-------
ax : matplotlib axis handle
The matplotlib axis handle referring to the plot.
"""
if dsname is None and len(self._ds.keys()) > 1:
raise ValueError(
'You must choose a datastream when there are 2 '
+ 'or more datasets in the TimeSeriesDisplay '
+ 'object.'
)
elif dsname is None:
dsname = list(self._ds.keys())[0]
xdata = self._get_data(dsname, field)[field]
if isinstance(bins, str):
bins = self._ds[dsname][bins]
else:
bins = xr.DataArray(bins)
if 'units' in bins.attrs:
xtitle = ''.join(['(', bins.attrs['units'], ')'])
else:
xtitle = 'Bin #'
if 'units' in xdata.attrs:
ytitle = ''.join(['(', xdata.attrs['units'], ')'])
else:
ytitle = field
if len(xdata.dims) > 1 and time is None:
raise ValueError(
'Input data has more than one dimension, ' + 'you must specify a time to plot!'
)
elif len(xdata.dims) > 1:
xdata = xdata.sel(time=time, method='nearest')
if len(bins.dims) > 1 or len(bins.values) != len(xdata.values):
raise ValueError(
'Bins must be a one dimensional field whose '
+ 'length is equal to the field length!'
)
# Get the current plotting axis, add day/night background and plot data
if self.fig is None:
self.fig = plt.figure()
if self.axes is None:
self.axes = np.array([plt.axes()])
self.fig.add_axes(self.axes[0])
# Set Title
if set_title is None:
set_title = ' '.join(
[
dsname,
field,
'on',
dt_utils.numpy_to_arm_date(self._ds[dsname].time.values[0]),
]
)
self.axes[subplot_index].set_title(set_title)
self.axes[subplot_index].step(bins.values, xdata.values, **kwargs)
self.axes[subplot_index].set_xlabel(xtitle)
self.axes[subplot_index].set_ylabel(ytitle)
return self.axes[subplot_index]
def plot_stairstep_graph(
self,
field,
dsname=None,
bins=10,
sortby_field=None,
sortby_bins=None,
subplot_index=(0,),
set_title=None,
density=False,
hist_kwargs=dict(),
**kwargs,
):
"""
This procedure will plot a stairstep plot of a histogram.
Parameters
----------
field : str
The name of the field to take the histogram of.
dsname : str or None
The name of the datastream the field is contained in. Set
to None to let ACT automatically determine this.
bins : array-like or int
The histogram bin boundaries to use. If not specified, numpy's
default 10 is used.
sortby_field : str or None
Set this option to a field name in order to sort the histograms
by a given field parameter. For example, one can sort histograms of CO2
concentration by temperature.
sortby_bins : array-like or None
The bins to sort the histograms by.
subplot_index : tuple
The subplot index to place the plot in.
set_title : str
The title of the plot.
density : bool
Set to True to plot a p.d.f. instead of a frequency histogram.
hist_kwargs : dict
Additional keyword arguments to pass to numpy histogram.
Other keyword arguments will be passed into :func:`matplotlib.pyplot.step`.
Returns
-------
return_dict : dict
A dictionary containing the plot axis handle, bin boundaries, and
generated histogram.
"""
if dsname is None and len(self._ds.keys()) > 1:
raise ValueError(
'You must choose a datastream when there are 2 '
+ 'or more datasets in the TimeSeriesDisplay '
+ 'object.'
)
elif dsname is None:
dsname = list(self._ds.keys())[0]
xdata = self._get_data(dsname, field)[field]
if 'units' in xdata.attrs:
xtitle = ''.join(['(', xdata.attrs['units'], ')'])
else:
xtitle = field
if sortby_field is not None:
ydata = self._ds[dsname][sortby_field]
if sortby_bins is None and sortby_field is not None:
# We will defaut the y direction to have the same # of bins as x
sortby_bins = np.linspace(ydata.values.min(), ydata.values.max(), len(bins))
# Get the current plotting axis, add day/night background and plot data
if self.fig is None:
self.fig = plt.figure()
if self.axes is None:
self.axes = np.array([plt.axes()])
self.fig.add_axes(self.axes[0])
if sortby_field is not None:
if 'units' in ydata.attrs:
ytitle = ''.join(['(', ydata.attrs['units'], ')'])
else:
ytitle = field
if sortby_bins is None:
my_hist, x_bins, y_bins = np.histogram2d(
xdata.values.flatten(), ydata.values.flatten(), bins=bins,
density=density, **hist_kwargs)
else:
my_hist, x_bins, y_bins = np.histogram2d(
xdata.values.flatten(),
ydata.values.flatten(),
density=density,
bins=[bins, sortby_bins],
**hist_kwargs
)
x_inds = (x_bins[:-1] + x_bins[1:]) / 2.0
self.axes[subplot_index].step(
x_inds,
my_hist[:, 0].flatten(),
label=(str(y_bins[0]) + ' to ' + str(y_bins[1])),
**kwargs,
)
for i in range(1, len(y_bins) - 1):
self.axes[subplot_index].step(
x_inds,
my_hist[:, i].flatten(),
label=(str(y_bins[i]) + ' to ' + str(y_bins[i + 1])),
**kwargs,
)
self.axes[subplot_index].legend()
else:
my_hist, bins = np.histogram(xdata.values.flatten(), bins=bins,
density=density, **hist_kwargs)
x_inds = (bins[:-1] + bins[1:]) / 2.0
self.axes[subplot_index].step(x_inds, my_hist, **kwargs)
# Set Title
if set_title is None:
set_title = ' '.join(
[
dsname,
field,
'on',
dt_utils.numpy_to_arm_date(self._ds[dsname].time.values[0]),
]
)
self.axes[subplot_index].set_title(set_title)
self.axes[subplot_index].set_ylabel('count')
self.axes[subplot_index].set_xlabel(xtitle)
return_dict = {}
return_dict['plot_handle'] = self.axes[subplot_index]
if 'x_bins' in locals():
return_dict['x_bins'] = x_bins
return_dict['y_bins'] = y_bins
else:
return_dict['bins'] = bins
return_dict['histogram'] = my_hist
return return_dict
def plot_heatmap(
self,
x_field,
y_field,
dsname=None,
x_bins=None,
y_bins=None,
subplot_index=(0,),
set_title=None,
density=False,
set_shading='auto',
hist_kwargs=dict(),
**kwargs,
):
"""
This procedure will plot a heatmap of a histogram from 2 variables.
Parameters
----------
x_field : str
The name of the field to take the histogram of on the X axis.
y_field : str
The name of the field to take the histogram of on the Y axis.
dsname : str or None
The name of the datastream the field is contained in. Set
to None to let ACT automatically determine this.
x_bins : array-like or None
The histogram bin boundaries to use for the variable on the X axis.
Set to None to use numpy's default boundaries.
y_bins : array-like or None
The histogram bin boundaries to use for the variable on the Y axis.
Set to None to use numpy's default boundaries.
subplot_index : tuple
The subplot index to place the plot in
set_title : str
The title of the plot.
density : bool
Set to True to plot a p.d.f. instead of a frequency histogram.
set_shading : string
Option to to set the matplotlib.pcolormesh shading parameter.
Default to 'auto'
hist_kwargs : Additional keyword arguments to pass to numpy histogram.
Other keyword arguments will be passed into :func:`matplotlib.pyplot.pcolormesh`.
Returns
-------
return_dict : dict
A dictionary containing the plot axis handle, bin boundaries, and
generated histogram.
"""
if dsname is None and len(self._ds.keys()) > 1:
raise ValueError(
'You must choose a datastream when there are 2 '
'or more datasets in the TimeSeriesDisplay '
'object.'
)
elif dsname is None:
dsname = list(self._ds.keys())[0]
ds = self._get_data(dsname, [x_field, y_field])
xdata, ydata = ds[x_field], ds[y_field]
if 'units' in xdata.attrs:
xtitle = ''.join(['(', xdata.attrs['units'], ')'])
else:
xtitle = x_field
if x_bins is not None and y_bins is None:
# We will defaut the y direction to have the same # of bins as x
y_bins = np.linspace(ydata.values.min(), ydata.values.max(), len(x_bins))
# Get the current plotting axis, add day/night background and plot data
if self.fig is None:
self.fig = plt.figure()
if self.axes is None:
self.axes = np.array([plt.axes()])
self.fig.add_axes(self.axes[0])
if 'units' in ydata.attrs:
ytitle = ''.join(['(', ydata.attrs['units'], ')'])
else:
ytitle = y_field
if x_bins is None:
my_hist, x_bins, y_bins = np.histogram2d(
xdata.values.flatten(), ydata.values.flatten(), density=density,
**hist_kwargs)
else:
my_hist, x_bins, y_bins = np.histogram2d(
xdata.values.flatten(),
ydata.values.flatten(),
density=density,
bins=[x_bins, y_bins],
**hist_kwargs
)
x_inds = (x_bins[:-1] + x_bins[1:]) / 2.0
y_inds = (y_bins[:-1] + y_bins[1:]) / 2.0
xi, yi = np.meshgrid(x_inds, y_inds, indexing='ij')
mesh = self.axes[subplot_index].pcolormesh(xi, yi, my_hist, shading=set_shading, **kwargs)
# Set Title
if set_title is None:
set_title = ' '.join(
[
dsname,
'on',
dt_utils.numpy_to_arm_date(self._ds[dsname].time.values[0]),
]
)
self.axes[subplot_index].set_title(set_title)
self.axes[subplot_index].set_ylabel(ytitle)
self.axes[subplot_index].set_xlabel(xtitle)
self.add_colorbar(mesh, title='count', subplot_index=subplot_index)
return_dict = {}
return_dict['plot_handle'] = self.axes[subplot_index]
return_dict['x_bins'] = x_bins
return_dict['y_bins'] = y_bins
return_dict['histogram'] = my_hist
return return_dict
<|code_end|>
examples/plotting/plot_hist_kwargs.py
<|code_start|>"""
Plot a histogram of Met data.
----------------------------------------------------
This is a simple example for how to plot a histogram
of Meteorological data, while using hist_kwargs parameter.
Author: Zachary Sherman
"""
from matplotlib import pyplot as plt
import numpy as np
import act
files = act.tests.sample_files.EXAMPLE_MET1
met_ds = act.io.armfiles.read_netcdf(files)
# Plot data
hist_kwargs = {'range': (-10, 10)}
histdisplay = act.plotting.HistogramDisplay(met_ds)
histdisplay.plot_stacked_bar_graph('temp_mean', bins=np.arange(-40, 40, 5),
hist_kwargs=hist_kwargs)
plt.show()
<|code_end|>
|
Example Incorrect
### Description
This example says it's for plotting sounding data but the example uses MET data. We should update this to be what it was originally intended.
https://arm-doe.github.io/ACT/source/auto_examples/plotting/plot_sonde.html
|
examples/plotting/plot_sonde.py
<|code_start|>"""
Plot a timeseries of sounding data
----------------------------------------------------
This is a simple example for how to plot a timeseries of sounding
data from the ARM SGP site.
Author: Robert Jackson
"""
from matplotlib import pyplot as plt
import act
files = act.tests.sample_files.EXAMPLE_MET_WILDCARD
met_ds = act.io.armfiles.read_netcdf(files)
print(met_ds)
met_temp = met_ds.temp_mean
met_rh = met_ds.rh_mean
met_lcl = (20.0 + met_temp / 5.0) * (100.0 - met_rh) / 1000.0
met_ds['met_lcl'] = met_lcl * 1000.0
met_ds['met_lcl'].attrs['units'] = 'm'
met_ds['met_lcl'].attrs['long_name'] = 'LCL Calculated from SGP MET E13'
# Plot data
display = act.plotting.TimeSeriesDisplay(met_ds)
display.add_subplots((3,), figsize=(15, 10))
display.plot('wspd_vec_mean', subplot_index=(0,))
display.plot('temp_mean', subplot_index=(1,))
display.plot('rh_mean', subplot_index=(2,))
plt.show()
<|code_end|>
|
examples/plotting/plot_sonde.py
<|code_start|>"""
Plot a timeseries of sounding data
----------------------------------------------------
This is a simple example for how to plot a timeseries of sounding
data from the ARM SGP site.
Author: Robert Jackson
"""
from matplotlib import pyplot as plt
import act
files = act.tests.sample_files.EXAMPLE_SONDE1
sonde_ds = act.io.armfiles.read_netcdf(files)
print(sonde_ds)
# Plot data
display = act.plotting.TimeSeriesDisplay(sonde_ds)
display.add_subplots((3,), figsize=(15, 10))
display.plot('wspd', subplot_index=(0,))
display.plot('tdry', subplot_index=(1,))
display.plot('rh', subplot_index=(2,))
plt.show()
<|code_end|>
|
act.utils.decode_present_weather classification tables
* ACT version: 1.4.2
* Python version: 3.9.16
* Operating System: Windows 10
### Description
I was working on decoding the FD70 present weather codes for the ATMOS FD70. It appears the FD70 uses both the WMO 4680 and 4677 in the numeric code. For it's metar code it uses WMO 4678.
When the code encountered a value of 88, for snow pellets, which it adopted from the 4677 table, it errors and stops. Suggestion would possible be adding in some of the WMO4677 to fill number gaps if applicable.
### What I Did
```
input:
act.utils.decode_present_weather(ds,variable='present_wx0')
output:
File D:\anaconda3\lib\site-packages\act\utils\inst_utils.py:133 in <listcomp>
wx_type = [weather[d] for d in data.values]
KeyError: 88
```
|
act/utils/inst_utils.py
<|code_start|>"""
Functions containing utilities for instruments.
"""
def decode_present_weather(ds, variable=None, decoded_name=None):
"""
This function is to decode codes reported from automatic weather stations suchas the PWD22.
This is based on WMO Table 4680.
Parameters
----------
ds : xarray.Dataset
ACT or Xarray dataset from which to convert codes
variable : string
Variable to decode
decoded_name : string
New variable name to store updated labels
Returns
-------
ds : xarray.Dataset
Returns dataset with new decoded data
References
----------
WMO Manual on Code Volume I.1 A-360.
https://library.wmo.int/doc_num.php?explnum_id=10235
"""
# Check to ensure that a variable name is passed
if variable is None:
raise ValueError('You must specify a variable')
if variable not in ds:
raise ValueError('Variable not in the dataset')
# Define the weather hash
weather = {
0: 'No significant weather observed',
1: 'Clouds generally dissolving or becoming less developed during the past hour',
2: 'State of the sky on the whole unchanged during the past hour',
3: 'Clouds generally forming or developing during the past hour',
4: 'Haze or smoke, or dust in suspension in the air, visibility >= 1 km',
5: 'Haze or smoke, or dust in suspension in the air, visibility < 1 km',
10: 'Mist',
11: 'Diamond dust',
12: 'Distant lightning',
18: 'Squalls',
20: 'Fog',
21: 'Precipitation',
22: 'Drizzle (not freezing) or snow grains',
23: 'Rain (not freezing)',
24: 'Snow',
25: 'Freezing drizzle or freezing rain',
26: 'Thunderstorm (with or without precipitation)',
27: 'Blowing or drifting snow or sand',
28: 'Blowing or drifting snow or sand, visibility >= 1 km',
29: 'Blowing or drifting snow or sand, visibility < 1 km',
30: 'Fog',
31: 'Fog or ice fog in patches',
32: 'Fog or ice fog, has become thinner during the past hour',
33: 'Fog or ice fog, no appreciable change during the past hour',
34: 'Fog or ice fog, has begun or become thicker during the past hour',
35: 'Fog, depositing rime',
40: 'Precipitation',
41: 'Precipitation, slight or moderate',
42: 'Precipitation, heavy',
43: 'Liquid precipitation, slight or moderate',
44: 'Liquid precipitation, heavy',
45: 'Solid precipitation, slight or moderate',
46: 'Solid precipitation, heavy',
47: 'Freezing precipitation, slight or moderate',
48: 'Freezing precipitation, heavy',
50: 'Drizzle',
51: 'Drizzle, not freezing, slight',
52: 'Drizzle, not freezing, moderate',
53: 'Drizzle, not freezing, heavy',
54: 'Drizzle, freezing, slight',
55: 'Drizzle, freezing, moderate',
56: 'Drizzle, freezing, heavy',
57: 'Drizzle and rain, slight',
58: 'Drizzle and rain, moderate or heavy',
60: 'Rain',
61: 'Rain, not freezing, slight',
62: 'Rain, not freezing, moderate',
63: 'Rain, not freezing, heavy',
64: 'Rain, freezing, slight',
65: 'Rain, freezing, moderate',
66: 'Rain, freezing, heavy',
67: 'Rain (or drizzle) and snow, slight',
68: 'Rain (or drizzle) and snow, moderate or heavy',
70: 'Snow',
71: 'Snow, light',
72: 'Snow, moderate',
73: 'Snow, heavy',
74: 'Ice pellets, slight',
75: 'Ice pellets, moderate',
76: 'Ice pellets, heavy',
77: 'Snow grains',
78: 'Ice crystals',
80: 'Shower(s) or Intermittent Precipitation',
81: 'Rain shower(s) or intermittent rain, slight',
82: 'Rain shower(s) or intermittent rain, moderate',
83: 'Rain shower(s) or intermittent rain, heavy',
84: 'Rain shower(s) or intermittent rain, violent',
85: 'Snow shower(s) or intermittent snow, slight',
86: 'Snow shower(s) or intermittent snow, moderate',
87: 'Snow shower(s) or intermittent snow, heavy',
89: 'Hail',
90: 'Thunderstorm',
91: 'Thunderstorm, slight or moderate, with no precipitation',
92: 'Thunderstorm, slight or moderate, with rain showers and/or snow showers',
93: 'Thunderstorm, slight or moderate, with hail',
94: 'Thunderstorm, heavy, with no precipitation',
95: 'Thunderstorm, heavy, with rain showers and/or snow showers',
96: 'Thunderstorm, heavy, with hail',
99: 'Tornado',
-9999: 'Missing',
}
# If a decoded name is not passed, make one
if decoded_name is None:
decoded_name = variable + '_decoded'
# Get data and fill nans with -9999
data = ds[variable]
data = data.fillna(-9999)
# Get the weather type for each code
wx_type = [weather[d] for d in data.values]
# Massage the data array to set back in the dataset
data.values = wx_type
if 'long_name' in data.attrs:
data.attrs['long_name'] = data.attrs['long_name'] + ' Decoded'
else:
data.attrs['long_name'] = 'Decoded present weather values'
if 'valid_min' in data.attrs:
del data.attrs['valid_min']
if 'valid_max' in data.attrs:
del data.attrs['valid_max']
ds[decoded_name] = data
return ds
<|code_end|>
|
act/utils/inst_utils.py
<|code_start|>"""
Functions containing utilities for instruments.
"""
def decode_present_weather(ds, variable=None, decoded_name=None):
"""
This function is to decode codes reported from automatic weather stations such as the PWD22.
This is based on WMO Table 4680 as well as a supplement table for WMO table 4677.
Parameters
----------
ds : xarray.Dataset
ACT or Xarray dataset from which to convert codes
variable : string
Variable to decode
decoded_name : string
New variable name to store updated labels
Returns
-------
ds : xarray.Dataset
Returns dataset with new decoded data
References
----------
WMO Manual on Code Volume I.1 A-360.
https://library.wmo.int/doc_num.php?explnum_id=10235
"""
# Check to ensure that a variable name is passed
if variable is None:
raise ValueError('You must specify a variable')
if variable not in ds:
raise ValueError('Variable not in the dataset')
# Define the weather hash for WMO table 4680.
weather_4680 = {
0: 'No significant weather observed',
1: 'Clouds generally dissolving or becoming less developed during the past hour',
2: 'State of the sky on the whole unchanged during the past hour',
3: 'Clouds generally forming or developing during the past hour',
4: 'Haze or smoke, or dust in suspension in the air, visibility >= 1 km',
5: 'Haze or smoke, or dust in suspension in the air, visibility < 1 km',
10: 'Mist',
11: 'Diamond dust',
12: 'Distant lightning',
18: 'Squalls',
20: 'Fog',
21: 'Precipitation',
22: 'Drizzle (not freezing) or snow grains',
23: 'Rain (not freezing)',
24: 'Snow',
25: 'Freezing drizzle or freezing rain',
26: 'Thunderstorm (with or without precipitation)',
27: 'Blowing or drifting snow or sand',
28: 'Blowing or drifting snow or sand, visibility >= 1 km',
29: 'Blowing or drifting snow or sand, visibility < 1 km',
30: 'Fog',
31: 'Fog or ice fog in patches',
32: 'Fog or ice fog, has become thinner during the past hour',
33: 'Fog or ice fog, no appreciable change during the past hour',
34: 'Fog or ice fog, has begun or become thicker during the past hour',
35: 'Fog, depositing rime',
40: 'Precipitation',
41: 'Precipitation, slight or moderate',
42: 'Precipitation, heavy',
43: 'Liquid precipitation, slight or moderate',
44: 'Liquid precipitation, heavy',
45: 'Solid precipitation, slight or moderate',
46: 'Solid precipitation, heavy',
47: 'Freezing precipitation, slight or moderate',
48: 'Freezing precipitation, heavy',
50: 'Drizzle',
51: 'Drizzle, not freezing, slight',
52: 'Drizzle, not freezing, moderate',
53: 'Drizzle, not freezing, heavy',
54: 'Drizzle, freezing, slight',
55: 'Drizzle, freezing, moderate',
56: 'Drizzle, freezing, heavy',
57: 'Drizzle and rain, slight',
58: 'Drizzle and rain, moderate or heavy',
60: 'Rain',
61: 'Rain, not freezing, slight',
62: 'Rain, not freezing, moderate',
63: 'Rain, not freezing, heavy',
64: 'Rain, freezing, slight',
65: 'Rain, freezing, moderate',
66: 'Rain, freezing, heavy',
67: 'Rain (or drizzle) and snow, slight',
68: 'Rain (or drizzle) and snow, moderate or heavy',
70: 'Snow',
71: 'Snow, light',
72: 'Snow, moderate',
73: 'Snow, heavy',
74: 'Ice pellets, slight',
75: 'Ice pellets, moderate',
76: 'Ice pellets, heavy',
77: 'Snow grains',
78: 'Ice crystals',
80: 'Shower(s) or Intermittent Precipitation',
81: 'Rain shower(s) or intermittent rain, slight',
82: 'Rain shower(s) or intermittent rain, moderate',
83: 'Rain shower(s) or intermittent rain, heavy',
84: 'Rain shower(s) or intermittent rain, violent',
85: 'Snow shower(s) or intermittent snow, slight',
86: 'Snow shower(s) or intermittent snow, moderate',
87: 'Snow shower(s) or intermittent snow, heavy',
89: 'Hail',
90: 'Thunderstorm',
91: 'Thunderstorm, slight or moderate, with no precipitation',
92: 'Thunderstorm, slight or moderate, with rain showers and/or snow showers',
93: 'Thunderstorm, slight or moderate, with hail',
94: 'Thunderstorm, heavy, with no precipitation',
95: 'Thunderstorm, heavy, with rain showers and/or snow showers',
96: 'Thunderstorm, heavy, with hail',
99: 'Tornado',
-9999: 'Missing',
}
# Define the weather hash for WMO table 4677.
weather_4677 = {
88: 'Shower(s) of snow pellets or small hail, with or without rain or rain and snow mixed, moderate or heavy',
}
# Join weather tables
weather_combined = dict(weather_4680)
weather_combined.update(weather_4677)
# Sort keys to be in order
weather = dict(sorted(weather_combined.items()))
# If a decoded name is not passed, make one
if decoded_name is None:
decoded_name = variable + '_decoded'
# Get data and fill nans with -9999
data = ds[variable]
data = data.fillna(-9999)
# Get the weather type for each code
wx_type = [weather[d] for d in data.values]
# Massage the data array to set back in the dataset
data.values = wx_type
if 'long_name' in data.attrs:
data.attrs['long_name'] = data.attrs['long_name'] + ' Decoded'
else:
data.attrs['long_name'] = 'Decoded present weather values'
if 'valid_min' in data.attrs:
del data.attrs['valid_min']
if 'valid_max' in data.attrs:
del data.attrs['valid_max']
ds[decoded_name] = data
return ds
<|code_end|>
|
Feedstock failing due to pandas datetime
### Description
CI is failing due to datetime units not being set for csv reader
### What I Did
See the PR here that was failing
https://github.com/conda-forge/act-atmos-feedstock/pull/63
|
act/io/csvfiles.py
<|code_start|>"""
This module contains I/O operations for loading csv files.
"""
import pathlib
import pandas as pd
from .armfiles import check_arm_standards
def read_csv(filename, sep=',', engine='python', column_names=None, skipfooter=0, ignore_index=True, **kwargs):
"""
Returns an `xarray.Dataset` with stored data and metadata from user-defined
query of CSV files.
Parameters
----------
filenames : str or list
Name of file(s) to read.
sep : str
The separator between columns in the csv file.
column_names : list or None
The list of column names in the csv file.
verbose : bool
If true, will print if a file is not found.
ignore_index : bool
Keyword for pandas concat function. If True, do not use the index
values along the concatenation axis. The resulting axis will be labeled
0, …, n - 1. This is useful if you are concatenating datasets where the
concatenation axis does not have meaningful indexing information. Note
the index values on the other axes are still respected in the join.
Additional keyword arguments will be passed into pandas.read_csv.
Returns
-------
ds : xarray.Dataset
ACT Xarray dataset. Will be None if the file is not found.
Examples
--------
This example will load the example sounding data used for unit testing:
.. code-block:: python
import act
ds = act.io.csvfiles.read(act.tests.sample_files.EXAMPLE_CSV_WILDCARD)
"""
# Convert to string if filename is a pathlib or not a list
if isinstance(filename, (pathlib.PurePath, str)):
filename = [str(filename)]
if isinstance(filename, list) and isinstance(filename[0], pathlib.PurePath):
filename = [str(ii) for ii in filename]
# Read data using pandas read_csv one file at a time and append to
# list. Then concatinate the list into one pandas dataframe.
li = []
for fl in filename:
df = pd.read_csv(
fl, sep=sep, names=column_names, skipfooter=skipfooter, engine=engine, **kwargs
)
li.append(df)
if len(li) == 1:
df = li[0]
else:
df = pd.concat(li, axis=0, ignore_index=ignore_index)
# Set Coordinates if there's a variable date_time
if 'date_time' in df:
df.date_time = df.date_time.astype('datetime64')
df.time = df.date_time
df = df.set_index('time')
# Convert to xarray DataSet
ds = df.to_xarray()
# Set additional variables
# Since we cannot assume a standard naming convention setting
# file_date and file_time to the first time in the file
x_coord = ds.coords.to_index().values[0]
if isinstance(x_coord, str):
x_coord_dt = pd.to_datetime(x_coord)
ds.attrs['_file_dates'] = x_coord_dt.strftime('%Y%m%d')
ds.attrs['_file_times'] = x_coord_dt.strftime('%H%M%S')
# Check for standard ARM datastream name, if none, assume the file is ARM
# standard format.
is_arm_file_flag = check_arm_standards(ds)
if is_arm_file_flag == 0:
ds.attrs['_datastream'] = '.'.join(filename[0].split('/')[-1].split('.')[0:2])
# Add additional attributes, site, standards flag, etc...
ds.attrs['_site'] = str(ds.attrs['_datastream'])[0:3]
ds.attrs['_arm_standards_flag'] = is_arm_file_flag
return ds
<|code_end|>
|
act/io/csvfiles.py
<|code_start|>"""
This module contains I/O operations for loading csv files.
"""
import pathlib
import pandas as pd
from .armfiles import check_arm_standards
def read_csv(filename, sep=',', engine='python', column_names=None, skipfooter=0, ignore_index=True, **kwargs):
"""
Returns an `xarray.Dataset` with stored data and metadata from user-defined
query of CSV files.
Parameters
----------
filenames : str or list
Name of file(s) to read.
sep : str
The separator between columns in the csv file.
column_names : list or None
The list of column names in the csv file.
verbose : bool
If true, will print if a file is not found.
ignore_index : bool
Keyword for pandas concat function. If True, do not use the index
values along the concatenation axis. The resulting axis will be labeled
0, …, n - 1. This is useful if you are concatenating datasets where the
concatenation axis does not have meaningful indexing information. Note
the index values on the other axes are still respected in the join.
Additional keyword arguments will be passed into pandas.read_csv.
Returns
-------
ds : xarray.Dataset
ACT Xarray dataset. Will be None if the file is not found.
Examples
--------
This example will load the example sounding data used for unit testing:
.. code-block:: python
import act
ds = act.io.csvfiles.read(act.tests.sample_files.EXAMPLE_CSV_WILDCARD)
"""
# Convert to string if filename is a pathlib or not a list
if isinstance(filename, (pathlib.PurePath, str)):
filename = [str(filename)]
if isinstance(filename, list) and isinstance(filename[0], pathlib.PurePath):
filename = [str(ii) for ii in filename]
# Read data using pandas read_csv one file at a time and append to
# list. Then concatinate the list into one pandas dataframe.
li = []
for fl in filename:
df = pd.read_csv(
fl, sep=sep, names=column_names, skipfooter=skipfooter, engine=engine, **kwargs
)
li.append(df)
if len(li) == 1:
df = li[0]
else:
df = pd.concat(li, axis=0, ignore_index=ignore_index)
# Set Coordinates if there's a variable date_time
if 'date_time' in df:
df.date_time = df.date_time.astype('datetime64[ns]')
df.time = df.date_time
df = df.set_index('time')
# Convert to xarray DataSet
ds = df.to_xarray()
# Set additional variables
# Since we cannot assume a standard naming convention setting
# file_date and file_time to the first time in the file
x_coord = ds.coords.to_index().values[0]
if isinstance(x_coord, str):
x_coord_dt = pd.to_datetime(x_coord)
ds.attrs['_file_dates'] = x_coord_dt.strftime('%Y%m%d')
ds.attrs['_file_times'] = x_coord_dt.strftime('%H%M%S')
# Check for standard ARM datastream name, if none, assume the file is ARM
# standard format.
is_arm_file_flag = check_arm_standards(ds)
if is_arm_file_flag == 0:
ds.attrs['_datastream'] = '.'.join(filename[0].split('/')[-1].split('.')[0:2])
# Add additional attributes, site, standards flag, etc...
ds.attrs['_site'] = str(ds.attrs['_datastream'])[0:3]
ds.attrs['_arm_standards_flag'] = is_arm_file_flag
return ds
<|code_end|>
|
Ability to see DQR Report link with add_dqr_to_qc (or an additional class to the QC module)
Something along these lines, having the ability to get the DQR report for a datasteam of interest via python would cut down on some time. like somehow spit out a link after add_dqr_to_qc. Since sometimes datastreams have no dqr reports.
|
act/io/armfiles.py
<|code_start|>"""
This module contains I/O operations for loading files that were created for the
Atmospheric Radiation Measurement program supported by the Department of Energy
Office of Science.
"""
import copy
import glob
import json
import re
import urllib
import warnings
from pathlib import Path, PosixPath
from netCDF4 import Dataset
from os import PathLike
import tarfile
import tempfile
import numpy as np
import xarray as xr
import act.utils as utils
from act.config import DEFAULT_DATASTREAM_NAME
from act.utils.io_utils import unpack_tar, unpack_gzip, cleanup_files, is_gunzip_file
def read_netcdf(
filenames,
concat_dim=None,
return_None=False,
combine='by_coords',
decode_times=True,
use_cftime=True,
use_base_time=False,
combine_attrs='override',
cleanup_qc=False,
keep_variables=None,
**kwargs,
):
"""
Returns `xarray.Dataset` with stored data and metadata from a user-defined
query of ARM-standard netCDF files from a single datastream. Has some procedures
to ensure time is correctly fomatted in returned Dataset.
Parameters
----------
filenames : str, pathlib.PosixPath, list of str, list of pathlib.PosixPath
Name of file(s) to read.
concat_dim : str
Dimension to concatenate files along.
return_None : boolean
Catch IOError exception when file not found and return None.
Default is False.
combine : str
String used by xarray.open_mfdataset() to determine how to combine
data files into one Dataset. See Xarray documentation for options.
decode_times : boolean
Standard Xarray option to decode time values from int/float to python datetime values.
Appears the default is to do this anyway but need this option to allow correct usage
of use_base_time.
use_cftime : boolean
Option to use cftime library to parse the time units string and correctly
establish the time values with a units string containing timezone offset.
This is used because the Pandas units string parser does not correctly recognize
time zone offset. Code will automatically detect cftime object and convert to datetime64
in returned Dataset.
use_base_time : boolean
Option to use ARM time variables base_time and time_offset. Useful when the time variable
is not included (older files) or when the units attribute is incorrectly formatted. Will use
the values of base_time and time_offset as seconds since epoch and create datetime64 values
for time coordinate. If set will change decode_times and use_cftime to False.
combine_attrs : str
String indicating how to combine attrs of the datasets being merged
cleanup_qc : boolean
Call clean.cleanup() method to convert to standardized ancillary quality control
variables. This will not allow any keyword options, so if non-default behavior is
desired will need to call clean.cleanup() method on the dataset after reading the data.
keep_variables : str or list of str
Variable names to read from data file. Works by creating a list of variable names
to exclude from reading and passing into open_mfdataset() via drop_variables keyword.
Still allows use of drop_variables keyword for variables not listed in first file to
read.
**kwargs : keywords
Keywords to pass through to xarray.open_mfdataset().
Returns
-------
ds : xarray.Dataset (or None)
ACT Xarray dataset (or None if no data file(s) found).
Examples
--------
This example will load the example sounding data used for unit testing.
.. code-block :: python
import act
ds = act.io.armfiles.read_netcdf(act.tests.sample_files.EXAMPLE_SONDE_WILDCARD)
print(ds)
"""
ds = None
filenames, cleanup_temp_directory = check_if_tar_gz_file(filenames)
file_dates = []
file_times = []
# If requested to use base_time and time_offset, set keywords to correct attribute values
# to pass into xarray open_mfdataset(). Need to turn off decode_times and use_cftime
# or else will try to convert base_time and time_offset. Depending on values of attributes
# may cause a failure.
if use_base_time:
decode_times = False
use_cftime = False
# Add funciton keywords to kwargs dictionary for passing into open_mfdataset.
kwargs['combine'] = combine
kwargs['concat_dim'] = concat_dim
kwargs['decode_times'] = decode_times
kwargs['use_cftime'] = use_cftime
if len(filenames) > 1 and not isinstance(filenames, str):
kwargs['combine_attrs'] = combine_attrs
# Check if keep_variables is set. If so determine correct drop_variables
if keep_variables is not None:
drop_variables = None
if 'drop_variables' in kwargs.keys():
drop_variables = kwargs['drop_variables']
kwargs['drop_variables'] = keep_variables_to_drop_variables(
filenames, keep_variables, drop_variables=drop_variables)
# Create an exception tuple to use with try statements. Doing it this way
# so we can add the FileNotFoundError if requested. Can add more error
# handling in the future.
except_tuple = (ValueError,)
if return_None:
except_tuple = except_tuple + (FileNotFoundError, OSError)
try:
# Read data file with Xarray function
ds = xr.open_mfdataset(filenames, **kwargs)
except except_tuple as exception:
# If requested return None for File not found error
if type(exception).__name__ == 'FileNotFoundError':
return None
# If requested return None for File not found error
if type(exception).__name__ == 'OSError' and exception.args[0] == 'no files to open':
return None
# Look at error message and see if could be nested error message. If so
# update combine keyword and try again. This should allow reading files
# without a time variable but base_time and time_offset variables.
if (
kwargs['combine'] != 'nested'
and type(exception).__name__ == 'ValueError'
and exception.args[0] == 'Could not find any dimension coordinates '
'to use to order the datasets for concatenation'
):
kwargs['combine'] = 'nested'
ds = xr.open_mfdataset(filenames, **kwargs)
else:
# When all else fails raise the orginal exception
raise exception
# If requested use base_time and time_offset to derive time. Assumes that the units
# of both are in seconds and that the value is number of seconds since epoch.
if use_base_time:
time = (ds['base_time'].values + ds['time_offset'].values) * 1000000.0
time = np.array(time, dtype='datetime64[us]')
# Need to use a new Dataset creation to correctly index time for use with
# .group and .resample methods in Xarray Datasets.
temp_ds = xr.Dataset({'time': (ds['time'].dims, time, ds['time'].attrs)})
ds['time'] = temp_ds['time']
del temp_ds
for att_name in ['units', 'ancillary_variables']:
try:
del ds['time'].attrs[att_name]
except KeyError:
pass
# Xarray has issues reading a CF formatted time units string if it contains
# timezone offset without a [+|-] preceeding timezone offset.
# https://github.com/pydata/xarray/issues/3644
# To ensure the times are read in correctly need to set use_cftime=True.
# This will read in time as cftime object. But Xarray uses numpy datetime64
# natively. This will convert the cftime time values to numpy datetime64. cftime
# does not preserve the time past ms precision. We will use ms precision for
# the conversion.
desired_time_precision = 'datetime64[ms]'
for var_name in ['time', 'time_offset']:
try:
if 'time' in ds.dims and type(ds[var_name].values[0]).__module__.startswith('cftime.'):
# If we just convert time to datetime64 the group, sel, and other Xarray
# methods will not work correctly because time is not indexed. Need to
# use the formation of a Dataset to correctly set the time indexing.
temp_ds = xr.Dataset(
{
var_name: (
ds[var_name].dims,
ds[var_name].values.astype(desired_time_precision),
ds[var_name].attrs,
)
}
)
ds[var_name] = temp_ds[var_name]
del temp_ds
# If time_offset is in file try to convert base_time as well
if var_name == 'time_offset':
ds['base_time'].values = ds['base_time'].values.astype(desired_time_precision)
ds['base_time'] = ds['base_time'].astype(desired_time_precision)
except KeyError:
pass
# Check if "time" variable is not in the netCDF file. If so try to use
# base_time and time_offset to make time variable. Basically a fix for incorrectly
# formatted files. May require using decode_times=False to initially read the data.
if 'time' in ds.dims and not np.issubdtype(ds['time'].dtype, np.datetime64):
try:
ds['time'] = ds['time_offset']
except (KeyError, ValueError):
pass
# Adding support for wildcards
if isinstance(filenames, str):
filenames = glob.glob(filenames)
elif isinstance(filenames, PosixPath):
filenames = [filenames]
# Get file dates and times that were read in to the dataset
filenames.sort()
for f in filenames:
f = Path(f).name
pts = re.match(r'(^[a-zA-Z0-9]+)\.([0-9a-z]{2})\.([\d]{8})\.([\d]{6})\.([a-z]{2,3}$)', f)
# If Not ARM format, read in first time for info
if pts is not None:
pts = pts.groups()
file_dates.append(pts[2])
file_times.append(pts[3])
else:
if ds['time'].size > 1:
dummy = ds['time'].values[0]
else:
dummy = ds['time'].values
file_dates.append(utils.numpy_to_arm_date(dummy))
file_times.append(utils.numpy_to_arm_date(dummy, returnTime=True))
# Add attributes
ds.attrs['_file_dates'] = file_dates
ds.attrs['_file_times'] = file_times
is_arm_file_flag = check_arm_standards(ds)
# Ensure that we have _datastream set whether or no there's
# a datastream attribute already.
if is_arm_file_flag == 0:
ds.attrs['_datastream'] = DEFAULT_DATASTREAM_NAME
else:
ds.attrs['_datastream'] = ds.attrs['datastream']
ds.attrs['_arm_standards_flag'] = is_arm_file_flag
if cleanup_qc:
ds.clean.cleanup()
if cleanup_temp_directory:
cleanup_files(files=filenames)
return ds
def keep_variables_to_drop_variables(
filenames,
keep_variables,
drop_variables=None):
"""
Returns a list of variable names to exclude from reading by passing into
`Xarray.open_dataset` drop_variables keyword. This can greatly help reduce
loading time and disk space use of the Dataset.
When passed a netCDF file name, will open the file using the netCDF4 library to get
list of variable names. There is less overhead reading the varible names using
netCDF4 library than Xarray. If more than one filename is provided or string is
used for shell syntax globbing, will use the first file in the list.
Parameters
----------
filenames : str, pathlib.PosixPath or list of str
Name of file(s) to read.
keep_variables : str or list of str
Variable names desired to keep. Do not need to list associated dimention
names. These will be automatically kept as well.
drop_variables : str or list of str
Variable names to explicitly add to returned list. May be helpful if a variable
exists in a file that is not in the first file in the list.
Returns
-------
drop_vars : list of str
Variable names to exclude from returned Dataset by using drop_variables keyword
when calling Xarray.open_dataset().
Examples
--------
.. code-block :: python
import act
filename = '/data/datastream/hou/houkasacrcfrM1.a1/houkasacrcfrM1.a1.20220404.*.nc'
drop_vars = act.io.armfiles.keep_variables_to_drop_variables(
filename, ['lat','lon','alt','crosspolar_differential_phase'],
drop_variables='variable_name_that_only_exists_in_last_file_of_the_day')
"""
read_variables = []
return_variables = []
if isinstance(keep_variables, str):
keep_variables = [keep_variables]
if isinstance(drop_variables, str):
drop_variables = [drop_variables]
# If filenames is a list subset to first file name.
if isinstance(filenames, (list, tuple)):
filename = filenames[0]
# If filenames is a string, check if it needs to be expanded in shell
# first. Then use first returned file name. Else use the string filename.
elif isinstance(filenames, str):
filename = glob.glob(filenames)
if len(filename) == 0:
return return_variables
else:
filename.sort()
filename = filename[0]
# Use netCDF4 library to extract the variable and dimension names.
rootgrp = Dataset(filename, 'r')
read_variables = list(rootgrp.variables)
dimensions = list(rootgrp.dimensions)
# Loop over the variables to exclude needed coordinate dimention names.
dims_to_keep = []
for var_name in keep_variables:
try:
dims_to_keep.extend(list(rootgrp[var_name].dimensions))
except IndexError:
pass
rootgrp.close()
# Remove names not matching keep_varibles excluding the associated coordinate dimentions
return_variables = set(read_variables) - set(keep_variables) - set(dims_to_keep)
# Add drop_variables to list
if drop_variables is not None:
return_variables = set(return_variables) | set(drop_variables)
return list(return_variables)
def check_arm_standards(ds):
"""
Checks to see if an xarray dataset conforms to ARM standards.
Parameters
----------
ds : Xarray Dataset
The dataset to check.
Returns
-------
flag : int
The flag corresponding to whether or not the file conforms
to ARM standards. Bit packed, so 0 for no, 1 for yes
"""
the_flag = 1 << 0
if 'datastream' not in ds.attrs.keys():
the_flag = 0
# Check if the historical global attribute name is
# used instead of updated name of 'datastream'. If so
# correct the global attributes and flip flag.
if 'zeb_platform' in ds.attrs.keys():
ds.attrs['datastream'] = copy.copy(ds.attrs['zeb_platform'])
del ds.attrs['zeb_platform']
the_flag = 1 << 0
return the_flag
def create_ds_from_arm_dod(proc, set_dims, version='', fill_value=-9999.0, scalar_fill_dim=None, local_file=False):
"""
Queries the ARM DOD api and builds a dataset based on the ARM DOD and
the dimension sizes that are passed in.
Parameters
----------
proc : string
Process to create the dataset off of. This is normally in the
format of inst.level. i.e. vdis.b1 or kazrge.a1. If local file
is true, this points to the path of the .dod file.
set_dims : dict
Dictionary of dims from the DOD and the corresponding sizes.
Time is required. Code will try and pull from DOD, unless set
through this variable
Note: names need to match exactly what is in the dod
i.e. {'drop_diameter': 50, 'time': 1440}
version : string
Version number of the ingest to use. If not set, defaults to
latest version
fill_value : float
Fill value for non-dimension variables. Dimensions cannot have
duplicate values and are incrementally set (0, 1, 2)
scalar_fill_dim : str
Depending on how the dataset is set up, sometimes the scalar values
are dimensioned to the main dimension. i.e. a lat/lon is set to have
a dimension of time. This is a way to set it up similarly.
local_file: bool
If true, the DOD will be loaded from a file whose name is proc.
If false, the DOD will be pulled from PCM.
Returns
-------
ds : xarray.Dataset
ACT Xarray dataset populated with all variables and attributes.
Examples
--------
.. code-block :: python
dims = {'time': 1440, 'drop_diameter': 50}
ds = act.io.armfiles.create_ds_from_arm_dod(
'vdis.b1', dims, version='1.2', scalar_fill_dim='time')
"""
# Set base url to get DOD information
if local_file is False:
base_url = 'https://pcm.arm.gov/pcm/api/dods/'
# Get data from DOD api
with urllib.request.urlopen(base_url + proc) as url:
data = json.loads(url.read().decode())
else:
with open(proc) as file:
data = json.loads(file.read())
# Check version numbers and alert if requested version in not available
keys = list(data['versions'].keys())
if version not in keys:
warnings.warn(
' '.join(
['Version:', version, 'not available or not specified. Using Version:', keys[-1]]
),
UserWarning,
)
version = keys[-1]
# Create empty xarray dataset
ds = xr.Dataset()
# Get the global attributes and add to dataset
atts = {}
for a in data['versions'][version]['atts']:
if a['name'] == 'string':
continue
if a['value'] is None:
a['value'] = ''
atts[a['name']] = a['value']
ds.attrs = atts
# Get variable information and create dataarrays that are
# then added to the dataset
# If not passed in through set_dims, will look to the DOD
# if not set in the DOD, then will raise error
variables = data['versions'][version]['vars']
dod_dims = data['versions'][version]['dims']
for d in dod_dims:
if d['name'] not in list(set_dims.keys()):
if d['length'] > 0:
set_dims[d['name']] = d['length']
else:
raise ValueError(
'Dimension length not set in DOD for '
+ d['name']
+ ', nor passed in through set_dim'
)
for v in variables:
dims = v['dims']
dim_shape = []
# Using provided dimension data, fill array accordingly for easy overwrite
if len(dims) == 0:
if scalar_fill_dim is None:
data_na = fill_value
else:
data_na = np.full(set_dims[scalar_fill_dim], fill_value)
v['dims'] = scalar_fill_dim
else:
for d in dims:
dim_shape.append(set_dims[d])
if len(dim_shape) == 1 and v['name'] == dims[0]:
data_na = np.arange(dim_shape[0])
else:
data_na = np.full(dim_shape, fill_value)
# Get attribute information. Had to do some things to get to print to netcdf
atts = {}
str_flag = False
for a in v['atts']:
if a['name'] == 'string':
str_flag = True
continue
if a['value'] is None:
continue
if str_flag and a['name'] == 'units':
continue
atts[a['name']] = a['value']
da = xr.DataArray(data=data_na, dims=v['dims'], name=v['name'], attrs=atts)
ds[v['name']] = da
return ds
@xr.register_dataset_accessor('write')
class WriteDataset:
"""
Class for cleaning up Dataset before writing to file.
"""
def __init__(self, xarray_ds):
self._ds = xarray_ds
def write_netcdf(
self,
cleanup_global_atts=True,
cleanup_qc_atts=True,
join_char='__',
make_copy=True,
cf_compliant=False,
delete_global_attrs=['qc_standards_version', 'qc_method', 'qc_comment'],
FillValue=-9999,
cf_convention='CF-1.8',
**kwargs,
):
"""
This is a wrapper around Dataset.to_netcdf to clean up the Dataset before
writing to disk. Some things are added to global attributes during ACT reading
process, and QC variables attributes are modified during QC cleanup process.
This will modify before writing to disk to better
match Climate & Forecast standards.
Parameters
----------
cleanup_global_atts : boolean
Option to cleanup global attributes by removing any global attribute
that starts with an underscore.
cleanup_qc_atts : boolean
Option to convert attributes that would be written as string array
to be a single character string. CF 1.7 does not allow string attribures.
Will use a single space a delimeter between values and join_char to replace
white space between words.
join_char : str
The character sting to use for replacing white spaces between words when converting
a list of strings to single character string attributes.
make_copy : boolean
Make a copy before modifying Dataset to write. For large Datasets this
may add processing time and memory. If modifying the Dataset is OK
try setting to False.
cf_compliant : boolean
Option to output file with additional attributes to make file Climate & Forecast
complient. May require runing .clean.cleanup() method on the dataset to fix other
issues first. This does the best it can but it may not be truely complient. You
should read the CF documents and try to make complient before writing to file.
delete_global_attrs : list
Optional global attributes to be deleted. Defaults to some standard
QC attributes that are not needed. Can add more or set to None to not
remove the attributes.
FillValue : int, float
The value to use as a _FillValue in output file. This is used to fix
issues with how Xarray handles missing_value upon reading. It's confusing
so not a perfect fix. Set to None to leave Xarray to do what it wants.
Set to a value to be the value used as _FillValue in the file and data
array. This should then remove missing_value attribute from the file as well.
cf_convention : str
The Climate and Forecast convention string to add to Conventions attribute.
**kwargs : keywords
Keywords to pass through to Dataset.to_netcdf()
Examples
--------
.. code-block :: python
ds.write.write_netcdf(path='output.nc')
"""
if make_copy:
write_ds = copy.deepcopy(self._ds)
else:
write_ds = self._ds
encoding = {}
if cleanup_global_atts:
for attr in list(write_ds.attrs):
if attr.startswith('_'):
del write_ds.attrs[attr]
if cleanup_qc_atts:
check_atts = ['flag_meanings', 'flag_assessments']
for var_name in list(write_ds.data_vars):
if 'standard_name' not in write_ds[var_name].attrs.keys():
continue
for attr_name in check_atts:
try:
att_values = write_ds[var_name].attrs[attr_name]
if isinstance(att_values, (list, tuple)):
att_values = [att_value.replace(' ', join_char) for att_value in att_values]
write_ds[var_name].attrs[attr_name] = ' '.join(att_values)
except KeyError:
pass
# Tell .to_netcdf() to not add a _FillValue attribute for
# quality control variables.
if FillValue is not None:
encoding[var_name] = {'_FillValue': None}
# Clean up _FillValue vs missing_value mess by creating an
# encoding dictionary with each variable's _FillValue set to
# requested fill value. May need to improve upon this for data type
# and other issues in the future.
if FillValue is not None:
skip_variables = ['base_time', 'time_offset', 'qc_time'] + list(encoding.keys())
for var_name in list(write_ds.data_vars):
if var_name not in skip_variables:
encoding[var_name] = {'_FillValue': FillValue}
if delete_global_attrs is not None:
for attr in delete_global_attrs:
try:
del write_ds.attrs[attr]
except KeyError:
pass
# If requested update global attributes and variables attributes for required
# CF attributes.
if cf_compliant:
# Get variable names and standard name for each variable
var_names = list(write_ds.keys())
standard_names = []
for var_name in var_names:
try:
standard_names.append(write_ds[var_name].attrs['standard_name'])
except KeyError:
standard_names.append(None)
# Check if time varible has axis and standard_name attribute
coord_name = 'time'
try:
write_ds[coord_name].attrs['axis']
except KeyError:
try:
write_ds[coord_name].attrs['axis'] = 'T'
except KeyError:
pass
try:
write_ds[coord_name].attrs['standard_name']
except KeyError:
try:
write_ds[coord_name].attrs['standard_name'] = 'time'
except KeyError:
pass
# Try to determine type of dataset by coordinate dimention named time
# and other factors
try:
write_ds.attrs['FeatureType']
except KeyError:
dim_names = list(write_ds.dims)
FeatureType = None
if dim_names == ['time']:
FeatureType = 'timeSeries'
elif len(dim_names) == 2 and 'time' in dim_names and 'bound' in dim_names:
FeatureType = 'timeSeries'
elif len(dim_names) >= 2 and 'time' in dim_names:
for var_name in var_names:
dims = list(write_ds[var_name].dims)
if len(dims) == 2 and 'time' in dims:
prof_dim = list(set(dims) - {'time'})[0]
if write_ds[prof_dim].values.size > 2:
FeatureType = 'timeSeriesProfile'
break
if FeatureType is not None:
write_ds.attrs['FeatureType'] = FeatureType
# Add axis and positive attributes to variables with standard_name
# equal to 'altitude'
alt_variables = [
var_names[ii] for ii, sn in enumerate(standard_names) if sn == 'altitude'
]
for var_name in alt_variables:
try:
write_ds[var_name].attrs['axis']
except KeyError:
write_ds[var_name].attrs['axis'] = 'Z'
try:
write_ds[var_name].attrs['positive']
except KeyError:
write_ds[var_name].attrs['positive'] = 'up'
# Check if the Conventions global attribute lists the CF convention
try:
Conventions = write_ds.attrs['Conventions']
Conventions = Conventions.split()
cf_listed = False
for ii in Conventions:
if ii.startswith('CF-'):
cf_listed = True
break
if not cf_listed:
Conventions.append(cf_convention)
write_ds.attrs['Conventions'] = ' '.join(Conventions)
except KeyError:
write_ds.attrs['Conventions'] = str(cf_convention)
# Reorder global attributes to ensure history is last
try:
global_attrs = write_ds.attrs
history = copy.copy(global_attrs['history'])
del global_attrs['history']
global_attrs['history'] = history
except KeyError:
pass
write_ds.to_netcdf(encoding=encoding, **kwargs)
def check_if_tar_gz_file(filenames):
"""
Unpacks gunzip and/or TAR file contents and returns Xarray Dataset
...
Parameters
----------
filenames : str, pathlib.Path
Filenames to check if gunzip and/or tar files.
Returns
-------
filenames : Paths to extracted files from gunzip or TAR files
"""
cleanup = False
if isinstance(filenames, (str, PathLike)):
try:
if is_gunzip_file(filenames) or tarfile.is_tarfile(str(filenames)):
tmpdirname = tempfile.mkdtemp()
cleanup = True
if is_gunzip_file(filenames):
filenames = unpack_gzip(filenames, write_directory=tmpdirname)
if tarfile.is_tarfile(str(filenames)):
filenames = unpack_tar(filenames, write_directory=tmpdirname, randomize=False)
except Exception:
pass
return filenames, cleanup
def read_mmcr(filenames):
"""
Reads in ARM MMCR files and splits up the variables into specific
mode variables based on what's in the files. MMCR files have the modes
interleaved and are not readable using xarray so some modifications are
needed ahead of time.
Parameters
----------
filenames : str, pathlib.PosixPath or list of str
Name of file(s) to read.
Returns
-------
ds : xarray.Dataset (or None)
ACT Xarray dataset (or None if no data file(s) found).
"""
# Sort the files to make sure they concatenate right
filenames.sort()
# Run through each file and read it in using netCDF4, then
# read it in with xarray
multi_ds = []
for f in filenames:
nc = Dataset(f, "a")
# Change heights name to range to read appropriately to xarray
if 'heights' in nc.dimensions:
nc.renameDimension('heights', 'range')
if nc is not None:
ds = xr.open_dataset(xr.backends.NetCDF4DataStore(nc))
multi_ds.append(ds)
# Concatenate datasets together
if len(multi_ds) > 1:
ds = xr.concat(multi_ds, dim='time')
else:
ds = multi_ds[0]
# Get mdoes and ranges with time/height modes
modes = ds['mode'].values
mode_vars = []
for v in ds:
if 'range' in ds[v].dims and 'time' in ds[v].dims and len(ds[v].dims) == 2:
mode_vars.append(v)
# For each mode, run extract data variables if available
# saves as individual variables in the file.
for m in modes:
if len(ds['ModeDescription'].shape) > 1:
mode_desc = ds['ModeDescription'].values[0, m]
if np.isnan(ds['heights'].values[0, m, :]).all():
continue
range_data = ds['heights'].values[0, m, :]
else:
mode_desc = ds['ModeDescription'].values[m]
if np.isnan(ds['heights'].values[m, :]).all():
continue
range_data = ds['heights'].values[m, :]
mode_desc = str(mode_desc).split('_')[-1][0:-1]
mode_desc = str(mode_desc).split('\'')[0]
idx = np.where(ds['ModeNum'].values == m)[0]
idy = np.where(~np.isnan(range_data))[0]
for v in mode_vars:
new_var_name = v + '_' + mode_desc
time_name = 'time_' + mode_desc
range_name = 'range_' + mode_desc
data = ds[v].values[idx, :]
data = data[:, idy]
attrs = ds[v].attrs
da = xr.DataArray(
data=data,
coords={time_name: ds['time'].values[idx], range_name: range_data[idy]},
dims=[time_name, range_name],
attrs=attrs
)
ds[new_var_name] = da
return ds
<|code_end|>
act/qc/arm.py
<|code_start|>"""
Functions specifically for working with QC/DQRs from
the Atmospheric Radiation Measurement Program (ARM).
"""
import datetime as dt
import numpy as np
import requests
from act.config import DEFAULT_DATASTREAM_NAME
def add_dqr_to_qc(
ds,
variable=None,
assessment='incorrect,suspect',
exclude=None,
include=None,
normalize_assessment=True,
cleanup_qc=True,
):
"""
Function to query the ARM DQR web service for reports and
add as a new quality control test to ancillary quality control
variable. If no anicllary quality control variable exist a new
one will be created and lined to the data variable through
ancillary_variables attribure.
See online documentation from ARM Data
Quality Office on the use of the DQR web service.
https://code.arm.gov/docs/dqrws-examples/wikis/home
Information about the DQR web-service avaible at
https://adc.arm.gov/dqrws/
Parameters
----------
ds : xarray.Dataset
Xarray dataset
variable : string, or list of str, or None
Variables to check DQR web service. If set to None will
attempt to update all variables.
assessment : string
assessment type to get DQRs. Current options include
'missing', 'suspect', 'incorrect' or any combination separated
by a comma.
exclude : list of strings
DQR IDs to exclude from adding into QC
include : list of strings
List of DQR IDs to include in flagging of data. Any other DQR IDs
will be ignored.
normalize_assessment : boolean
The DQR assessment term is different than the embedded QC
term. Embedded QC uses "Bad" and "Indeterminate" while
DQRs use "Incorrect" and "Suspect". Setting this will ensure
the same terms are used for both.
cleanup_qc : boolean
Call clean.cleanup() method to convert to standardized ancillary
quality control variables. Has a little bit of overhead so
if the Dataset has already been cleaned up, no need to run.
Returns
-------
ds : xarray.Dataset
Xarray dataset containing new quality control variables
Examples
--------
.. code-block:: python
from act.qc.arm import add_dqr_to_qc
ds = add_dqr_to_qc(ds, variable=['temp_mean', 'atmos_pressure'])
"""
# DQR Webservice goes off datastreams, pull from the dataset
if 'datastream' in ds.attrs:
datastream = ds.attrs['datastream']
elif '_datastream' in ds.attrs:
datastream = ds.attrs['_datastream']
else:
raise ValueError('Dataset does not have datastream attribute')
if datastream == DEFAULT_DATASTREAM_NAME:
raise ValueError("'datastream' name required for DQR service set to default value "
f"{datastream}. Unable to perform DQR service query.")
# Clean up QC to conform to CF conventions
if cleanup_qc:
ds.clean.cleanup()
# In order to properly flag data, get all variables if None. Exclude QC variables.
if variable is None:
variable = list(set(ds.data_vars) - set(ds.clean.matched_qc_variables))
# Check to ensure variable is list
if not isinstance(variable, (list, tuple)):
variable = [variable]
# Loop through each variable and call web service for that variable
for var_name in variable:
# Create URL
url = 'http://www.archive.arm.gov/dqrws/ARMDQR?datastream='
url += datastream
url += '&varname=' + var_name
url += ''.join(
[
'&searchmetric=',
assessment,
'&dqrfields=dqrid,starttime,endtime,metric,subject',
]
)
# Call web service
req = requests.get(url)
# Check status values and raise error if not successful
status = req.status_code
if status == 400:
raise ValueError('Check parameters')
if status == 500:
raise ValueError('DQR Webservice Temporarily Down')
# Get data and run through each dqr
dqrs = req.text.splitlines()
time = ds['time'].values
dqr_results = {}
for line in dqrs:
line = line.split('|')
dqr_no = line[0]
# Exclude DQRs if in list
if exclude is not None and dqr_no in exclude:
continue
# Only include if in include list
if include is not None and dqr_no not in include:
continue
starttime = np.datetime64(dt.datetime.utcfromtimestamp(int(line[1])))
endtime = np.datetime64(dt.datetime.utcfromtimestamp(int(line[2])))
ind = np.where((time >= starttime) & (time <= endtime))
if ind[0].size == 0:
continue
if dqr_no in dqr_results.keys():
dqr_results[dqr_no]['index'] = np.append(dqr_results[dqr_no]['index'], ind)
else:
dqr_results[dqr_no] = {
'index': ind,
'test_assessment': line[3],
'test_meaning': ': '.join([dqr_no, line[-1]]),
}
for key, value in dqr_results.items():
try:
ds.qcfilter.add_test(
var_name,
index=value['index'],
test_meaning=value['test_meaning'],
test_assessment=value['test_assessment'],
)
except IndexError:
print(f"Skipping '{var_name}' DQR application because of IndexError")
if normalize_assessment:
ds.clean.normalize_assessment(variables=var_name)
return ds
<|code_end|>
act/qc/qcfilter.py
<|code_start|>"""
Functions and methods for creating ancillary quality control variables
and filters (masks) which can be used with various corrections
routines in ACT.
"""
import dask
import numpy as np
import xarray as xr
from act.qc import comparison_tests, qctests, bsrn_tests
@xr.register_dataset_accessor('qcfilter')
class QCFilter(qctests.QCTests, comparison_tests.QCTests, bsrn_tests.QCTests):
"""
A class for building quality control variables containing arrays for
filtering data based on a set of test condition typically based on the
values in the data fields. These filters can be used in various
algorithms and calculations within ACT.
"""
def __init__(self, ds):
"""initialize"""
self._ds = ds
def check_for_ancillary_qc(
self,
var_name,
add_if_missing=True,
cleanup=False,
flag_type=False
):
"""
Method to check if a quality control variable exist in the dataset
and return the quality control varible name.
Will call create_qc_variable() to make variable if does not exist
and update_ancillary_variable() to ensure linkage between data and
quality control variable. Can also be used just to get the
corresponding quality control variable name with adding if
it is missing.
Parameters
----------
var_name : str
Data variable name.
add_if_missing : boolean
Add quality control variable if missing from teh dataset. Will raise
and exception if the var_name does not exist in Dataset. Set to False
to not raise exception.
cleanup : boolean
Option to run qc.clean.cleanup() method on the dataset
to ensure the dataset was updated from ARM QC to the
correct standardized QC.
flag_type : boolean
Indicating the QC variable uses flag_values instead of
flag_masks.
Returns
-------
qc_var_name : str or None
Name of existing or new quality control variable. Returns
None if no existing quality control variable is found and
add_if_missing is set to False.
Examples
--------
.. code-block:: python
from act.tests import EXAMPLE_METE40
from act.io.armfiles import read_netcdf
ds = read_netcdf(EXAMPLE_METE40, cleanup_qc=True)
qc_var_name = ds.qcfilter.check_for_ancillary_qc('atmos_pressure')
print(f'qc_var_name: {qc_var_name}')
qc_var_name = ds.qcfilter.check_for_ancillary_qc('the_greatest_variable_ever',
add_if_missing=False)
print(f'qc_var_name: {qc_var_name}')
"""
qc_var_name = None
try:
ancillary_variables = self._ds[var_name].attrs['ancillary_variables']
if isinstance(ancillary_variables, str):
ancillary_variables = ancillary_variables.split()
for var in ancillary_variables:
for attr, value in self._ds[var].attrs.items():
if attr == 'standard_name' and 'quality_flag' in value:
qc_var_name = var
if add_if_missing and qc_var_name is None:
qc_var_name = self._ds.qcfilter.create_qc_variable(var_name, flag_type=flag_type)
except KeyError:
# Since no ancillary_variables exist look for ARM style of QC
# variable name. If it exists use it else create new
# QC varaible.
if add_if_missing:
try:
self._ds['qc_' + var_name]
qc_var_name = 'qc_' + var_name
except KeyError:
qc_var_name = self._ds.qcfilter.create_qc_variable(
var_name, flag_type=flag_type
)
# Make sure data varaible has a variable attribute linking
# data variable to QC variable.
if add_if_missing:
self._ds.qcfilter.update_ancillary_variable(var_name, qc_var_name)
# Clean up quality control variables to the requried standard in the
# xarray dataset.
if cleanup:
self._ds.clean.cleanup(handle_missing_value=True, link_qc_variables=False)
return qc_var_name
def create_qc_variable(
self, var_name,
flag_type=False,
flag_values_set_value=0,
qc_var_name=None
):
"""
Method to create a quality control variable in the dataset.
Will try not to destroy the qc variable by appending numbers
to the variable name if needed.
Parameters
----------
var_name : str
Data variable name.
flag_type : boolean
If an integer flag type should be created instead of
bitpacked mask type. Will create flag_values instead of
flag_masks.
flag_values_set_value : int
Initial flag value to use when initializing array.
qc_var_name : str
Optional new quality control variable name. If not set
will create one using \\"qc\\_\\" prepended to the data
variable name. If the name given or created is taken
will append a number that does not have a conflict.
Returns
-------
qc_var_name : str
Name of new quality control variable created.
Examples
--------
.. code-block:: python
from act.tests import EXAMPLE_AOSMET
from act.io.armfiles import read_netcdf
ds = read_netcdf(EXAMPLE_AOSMET)
qc_var_name = ds.qcfilter.create_qc_variable('temperature_ambient')
print(qc_var_name)
print(ds[qc_var_name])
"""
# Make QC variable long name. The variable long_name attribute
# may not exist so catch that error and set to default.
try:
qc_variable_long_name = (
'Quality check results on field: ' + self._ds[var_name].attrs['long_name']
)
except KeyError:
qc_variable_long_name = 'Quality check results for ' + var_name
# Make a new quality control variable name. Check if exists in the
# dataset. If so loop through creation of new name until one is
# found that will not replace existing variable.
if qc_var_name is None:
qc_var_name = 'qc_' + var_name
variable_names = list(self._ds.data_vars)
if qc_var_name in variable_names:
for ii in range(1, 100):
temp_qc_var_name = '_'.join([qc_var_name, str(ii)])
if temp_qc_var_name not in variable_names:
qc_var_name = temp_qc_var_name
break
# Create the QC variable filled with 0 values matching the
# shape of data variable.
try:
qc_data = dask.array.from_array(
np.zeros_like(self._ds[var_name].values, dtype=np.int32),
chunks=self._ds[var_name].data.chunksize,
)
except AttributeError:
qc_data = np.zeros_like(self._ds[var_name].values, dtype=np.int32)
# Updating to use coords instead of dim, which caused a loss of
# attribuets as noted in Issue 347
self._ds[qc_var_name] = xr.DataArray(
data=qc_data,
coords=self._ds[var_name].coords,
attrs={'long_name': qc_variable_long_name, 'units': '1'},
)
# Update if using flag_values and don't want 0 to be default value.
if flag_type and flag_values_set_value != 0:
self._ds[qc_var_name].values = self._ds[qc_var_name].values + int(
flag_values_set_value
)
# Add requried variable attributes.
if flag_type:
self._ds[qc_var_name].attrs['flag_values'] = []
else:
self._ds[qc_var_name].attrs['flag_masks'] = []
self._ds[qc_var_name].attrs['flag_meanings'] = []
self._ds[qc_var_name].attrs['flag_assessments'] = []
self._ds[qc_var_name].attrs['standard_name'] = 'quality_flag'
self.update_ancillary_variable(var_name, qc_var_name=qc_var_name)
return qc_var_name
def update_ancillary_variable(self, var_name, qc_var_name=None):
"""
Method to check if ancillary_variables variable attribute
is set with quality control variable name.
Parameters
----------
var_name : str
Data variable name.
qc_var_name : str
quality control variable name. If not given will attempt
to get the name from data variable ancillary_variables
attribute.
Examples
--------
.. code-block:: python
from act.tests import EXAMPLE_AOSMET
from act.io.armfiles import read_netcdf
ds = read_netcdf(EXAMPLE_AOSMET)
var_name = 'temperature_ambient'
qc_var_name = ds.qcfilter.create_qc_variable(var_name)
del ds[var_name].attrs['ancillary_variables']
ds.qcfilter.update_ancillary_variable(var_name, qc_var_name)
print(ds[var_name].attrs['ancillary_variables'])
"""
if qc_var_name is None:
qc_var_name = self._ds.qcfilter.check_for_ancillary_qc(var_name, add_if_missing=False)
if qc_var_name is None:
return
try:
ancillary_variables = self._ds[var_name].attrs['ancillary_variables']
if qc_var_name not in ancillary_variables:
ancillary_variables = ' '.join([ancillary_variables, qc_var_name])
except KeyError:
ancillary_variables = qc_var_name
self._ds[var_name].attrs['ancillary_variables'] = ancillary_variables
def add_test(
self,
var_name,
index=None,
test_number=None,
test_meaning=None,
test_assessment='Bad',
flag_value=False,
recycle=False,
):
"""
Method to add a new test/filter to a quality control variable.
Parameters
----------
var_name : str
data variable name
index : int, bool, list of int or bool, numpy array, tuple of numpy arrays, None
Indexes into quality control array to set the test bit.
If not set or set to None will not set the test on any
element of the quality control variable but will still
add the test to the flag_masks, flag_meanings and
flag_assessments attributes.
test_number : int
Test number to use. If keyword is not set will use first
available test bit/test number.
test_meaning : str
String describing the test. Will be added to flag_meanings
variable attribute.
test_assessment : str
String describing the test assessment. If not set will use
"Bad" as the string to append to flag_assessments. Will
update to be capitalized.
flag_value : boolean
Switch to use flag_values integer quality control.
recyle : boolean
Option to use number less than next highest test if available. For example
tests 1, 2, 4, 5 are set. Set to true the next test chosen will be 3, else
will be 6.
Returns
-------
test_dict : dict
A dictionary containing information added to the QC
variable.
Examples
--------
.. code-block:: python
result = ds.qcfilter.add_test(var_name, test_meaning='Birds!')
"""
test_dict = {}
if test_meaning is None:
raise ValueError(
'You need to provide a value for test_meaning '
'keyword when calling the add_test method'
)
# This ensures the indexing will work even if given float values.
# Preserves tuples from np.where() or boolean arrays for standard
# python indexing.
if index is not None and not isinstance(index, (np.ndarray, tuple)):
index = np.array(index)
if index.dtype.kind not in np.typecodes['AllInteger']:
index = index.astype(int)
# Ensure assessment is capitalized to be consistent
test_assessment = test_assessment.capitalize()
qc_var_name = self._ds.qcfilter.check_for_ancillary_qc(var_name, flag_type=flag_value)
if test_number is None:
test_number = self._ds.qcfilter.available_bit(qc_var_name, recycle=recycle)
self._ds.qcfilter.set_test(var_name, index, test_number, flag_value)
if flag_value:
try:
self._ds[qc_var_name].attrs['flag_values'].append(test_number)
except KeyError:
self._ds[qc_var_name].attrs['flag_values'] = [test_number]
else:
# Determine if flag_masks test number is too large for current data type.
# If so up convert data type.
flag_masks = np.array(self._ds[qc_var_name].attrs['flag_masks'])
mask_dtype = flag_masks.dtype
if not np.issubdtype(mask_dtype, np.integer):
mask_dtype = np.uint32
if np.iinfo(mask_dtype).max - set_bit(0, test_number) <= -1:
if mask_dtype == np.int8 or mask_dtype == np.uint8:
mask_dtype = np.uint16
elif mask_dtype == np.int16 or mask_dtype == np.uint16:
mask_dtype = np.uint32
elif mask_dtype == np.int32 or mask_dtype == np.uint32:
mask_dtype = np.uint64
flag_masks = flag_masks.astype(mask_dtype)
flag_masks = np.append(flag_masks, np.array(set_bit(0, test_number), dtype=mask_dtype))
self._ds[qc_var_name].attrs['flag_masks'] = list(flag_masks)
try:
self._ds[qc_var_name].attrs['flag_meanings'].append(test_meaning)
except KeyError:
self._ds[qc_var_name].attrs['flag_meanings'] = [test_meaning]
try:
self._ds[qc_var_name].attrs['flag_assessments'].append(test_assessment)
except KeyError:
self._ds[qc_var_name].attrs['flag_assessments'] = [test_assessment]
test_dict['test_number'] = test_number
test_dict['test_meaning'] = test_meaning
test_dict['test_assessment'] = test_assessment
test_dict['qc_variable_name'] = qc_var_name
test_dict['variable_name'] = var_name
return test_dict
def remove_test(
self,
var_name=None,
test_number=None,
qc_var_name=None,
flag_value=False,
flag_values_reset_value=0,
):
"""
Method to remove a test/filter from a quality control variable. Must set
var_name or qc_var_name.
Parameters
----------
var_name : str or None
Data variable name.
test_number : int
Test number to remove.
qc_var_name : str or None
Quality control variable name. Ignored if var_name is set.
flag_value : boolean
Switch to use flag_values integer quality control.
flag_values_reset_value : int
Value to use when resetting a flag_values value to not be set.
Examples
--------
.. code-block:: python
ds.qcfilter.remove_test(var_name, test_number=3)
"""
if test_number is None:
raise ValueError(
'You need to provide a value for test_number '
'keyword when calling the remove_test() method'
)
if var_name is None and qc_var_name is None:
raise ValueError(
'You need to provide a value for var_name or qc_var_name '
'keyword when calling the remove_test() method'
)
if var_name is not None:
qc_var_name = self._ds.qcfilter.check_for_ancillary_qc(var_name)
# Determine which index is using the test number
index = None
if flag_value:
flag_values = self._ds[qc_var_name].attrs['flag_values']
for ii, flag_num in enumerate(flag_values):
if flag_num == test_number:
index = ii
break
else:
flag_masks = self._ds[qc_var_name].attrs['flag_masks']
for ii, bit_num in enumerate(flag_masks):
if parse_bit(bit_num)[0] == test_number:
index = ii
break
# If can't find the index of test return before doing anything.
if index is None:
return
if flag_value:
remove_index = self._ds.qcfilter.get_qc_test_mask(
var_name=var_name,
qc_var_name=qc_var_name,
test_number=test_number,
return_index=True,
flag_value=True,
)
self._ds.qcfilter.unset_test(
var_name=var_name,
qc_var_name=qc_var_name,
index=remove_index,
test_number=test_number,
flag_value=flag_value,
flag_values_reset_value=flag_values_reset_value,
)
del flag_values[index]
self._ds[qc_var_name].attrs['flag_values'] = flag_values
else:
remove_index = self._ds.qcfilter.get_qc_test_mask(
var_name=var_name,
qc_var_name=qc_var_name,
test_number=test_number,
return_index=True,
)
self._ds.qcfilter.unset_test(
var_name=var_name,
qc_var_name=qc_var_name,
index=remove_index,
test_number=test_number,
flag_value=flag_value,
)
if isinstance(flag_masks, list):
del flag_masks[index]
else:
flag_masks = np.delete(flag_masks, index)
self._ds[qc_var_name].attrs['flag_masks'] = flag_masks
flag_meanings = self._ds[qc_var_name].attrs['flag_meanings']
del flag_meanings[index]
self._ds[qc_var_name].attrs['flag_meanings'] = flag_meanings
flag_assessments = self._ds[qc_var_name].attrs['flag_assessments']
del flag_assessments[index]
self._ds[qc_var_name].attrs['flag_assessments'] = flag_assessments
def set_test(self, var_name, index=None, test_number=None, flag_value=False):
"""
Method to set a test/filter in a quality control variable.
Parameters
----------
var_name : str
Data variable name.
index : int or list or numpy array
Index to set test in quality control array. If want to
unset all values will need to pass in index of all values.
test_number : int
Test number to set.
flag_value : boolean
Switch to use flag_values integer quality control.
Examples
--------
.. code-block:: python
index = [0, 1, 2, 30]
ds.qcfilter.set_test(var_name, index=index, test_number=2)
"""
qc_var_name = self._ds.qcfilter.check_for_ancillary_qc(var_name)
qc_variable = np.array(self._ds[qc_var_name].values)
# Ensure the qc_variable data type is integer. This ensures bitwise comparison
# will not cause an error.
if qc_variable.dtype.kind not in np.typecodes['AllInteger']:
qc_variable = qc_variable.astype(int)
# Determine if test number is too large for current data type. If so
# up convert data type.
dtype = qc_variable.dtype
if np.iinfo(dtype).max - set_bit(0, test_number) < -1:
if dtype == np.int8:
dtype = np.int16
elif dtype == np.int16:
dtype = np.int32
elif dtype == np.int32:
dtype = np.int64
qc_variable = qc_variable.astype(dtype)
if index is not None:
if flag_value:
qc_variable[index] = test_number
else:
qc_variable[index] = set_bit(qc_variable[index], test_number)
self._ds[qc_var_name].values = qc_variable
def unset_test(
self,
var_name=None,
qc_var_name=None,
index=None,
test_number=None,
flag_value=False,
flag_values_reset_value=0,
):
"""
Method to unset a test/filter from a quality control variable.
Parameters
----------
var_name : str or None
Data variable name.
qc_var_name : str or None
Quality control variable name. Ignored if var_name is set.
index : int or list or numpy array
Index to unset test in quality control array. If want to
unset all values will need to pass in index of all values.
test_number : int
Test number to remove.
flag_value : boolean
Switch to use flag_values integer quality control.
flag_values_reset_value : int
Value to use when resetting a flag_values value to not be set.
Examples
--------
.. code-block:: python
ds.qcfilter.unset_test(var_name, index=range(10, 100), test_number=2)
"""
if index is None:
return
if var_name is None and qc_var_name is None:
raise ValueError(
'You need to provide a value for var_name or qc_var_name '
'keyword when calling the unset_test() method'
)
if var_name is not None:
qc_var_name = self._ds.qcfilter.check_for_ancillary_qc(var_name)
# Get QC variable
qc_variable = self._ds[qc_var_name].values
# Ensure the qc_variable data type is integer. This ensures bitwise comparison
# will not cause an error.
if qc_variable.dtype.kind not in np.typecodes['AllInteger']:
qc_variable = qc_variable.astype(int)
if flag_value:
qc_variable[index] = flag_values_reset_value
else:
qc_variable[index] = unset_bit(qc_variable[index], test_number)
self._ds[qc_var_name].values = qc_variable
def available_bit(self, qc_var_name, recycle=False):
"""
Method to determine next available bit or flag to use with a QC test.
Will check for flag_masks first and if not found will check for
flag_values. This will drive how the next value is chosen.
Parameters
----------
qc_var_name : str
Quality control variable name.
recycle : boolean
Option to look for a bit (test) not in use starting from 1.
If a test is not defined will return the lowest number, else
will just use next highest number.
Returns
-------
test_num : int
Next available test number.
Examples
--------
.. code-block:: python
from act.tests import EXAMPLE_METE40
from act.io.armfiles import read_netcdf
ds = read_netcdf(EXAMPLE_METE40, cleanup_qc=True)
test_number = ds.qcfilter.available_bit('qc_atmos_pressure')
print(test_number)
"""
try:
flag_masks = self._ds[qc_var_name].attrs['flag_masks']
flag_value = False
except KeyError:
try:
flag_masks = self._ds[qc_var_name].attrs['flag_values']
flag_value = True
except KeyError:
try:
self._ds[qc_var_name].attrs['flag_values']
flag_masks = self._ds[qc_var_name].attrs['flag_masks']
flag_value = False
except KeyError:
raise ValueError(
'Problem getting next value from '
'available_bit(). flag_values and '
'flag_masks not set as expected'
)
if flag_masks == []:
next_bit = 1
else:
if flag_value:
if recycle:
next_bit = min(set(range(1, 100000)) - set(flag_masks))
else:
next_bit = max(flag_masks) + 1
else:
if recycle:
tests = [parse_bit(mask)[0] for mask in flag_masks]
next_bit = min(set(range(1, 63)) - set(tests))
else:
next_bit = parse_bit(max(flag_masks))[0] + 1
return int(next_bit)
def get_qc_test_mask(
self,
var_name=None,
test_number=None,
qc_var_name=None,
flag_value=False,
return_index=False,
):
"""
Returns a numpy array of False or True where a particular
flag or bit is set in a numpy array. Must set var_name or qc_var_name
when calling.
Parameters
----------
var_name : str or None
Data variable name.
test_number : int
Test number to return array where test is set.
qc_var_name : str or None
Quality control variable name. Ignored if var_name is set.
flag_value : boolean
Switch to use flag_values integer quality control.
return_index : boolean
Return a numpy array of index numbers into QC array where the
test is set instead of False or True mask.
Returns
-------
test_mask : numpy bool array or numpy integer array
A numpy boolean array with False or True where the test number or
bit was set, or numpy integer array of indexes where test is True.
Examples
--------
.. code-block:: python
from act.io.armfiles import read_netcdf
from act.tests import EXAMPLE_IRT25m20s
ds = read_netcdf(EXAMPLE_IRT25m20s)
var_name = "inst_up_long_dome_resist"
result = ds.qcfilter.add_test(
var_name, index=[0, 1, 2], test_meaning="Birds!"
)
qc_var_name = result["qc_variable_name"]
mask = ds.qcfilter.get_qc_test_mask(
var_name, result["test_number"], return_index=True
)
print(mask)
array([0, 1, 2])
mask = ds.qcfilter.get_qc_test_mask(var_name, result["test_number"])
print(mask)
array([True, True, True, ..., False, False, False])
data = ds[var_name].values
print(data[mask])
array([7.84, 7.8777, 7.8965], dtype=float32)
import numpy as np
data[mask] = np.nan
print(data)
array([nan, nan, nan, ..., 7.6705, 7.6892, 7.6892], dtype=float32)
"""
if var_name is None and qc_var_name is None:
raise ValueError(
'You need to provide a value for var_name or qc_var_name '
'keyword when calling the get_qc_test_mask() method'
)
if test_number is None:
raise ValueError(
'You need to provide a value for test_number '
'keyword when calling the get_qc_test_mask() method'
)
if var_name is not None:
qc_var_name = self._ds.qcfilter.check_for_ancillary_qc(var_name)
qc_variable = self._ds[qc_var_name].values
# Ensure the qc_variable data type is integer. This ensures bitwise comparison
# will not cause an error.
if qc_variable.dtype.kind not in np.typecodes['AllInteger']:
qc_variable = qc_variable.astype(int)
if flag_value:
tripped = qc_variable == test_number
else:
check_bit = set_bit(0, test_number) & qc_variable
tripped = check_bit > 0
test_mask = np.full(qc_variable.shape, False, dtype='bool')
# Make sure test_mask is an array. If qc_variable is scalar will
# be retuned from np.zeros as scalar.
test_mask = np.atleast_1d(test_mask)
test_mask[tripped] = True
test_mask = np.ma.make_mask(test_mask, shrink=False)
if return_index:
test_mask = np.where(test_mask)[0]
return test_mask
def get_masked_data(
self,
var_name,
rm_assessments=None,
rm_tests=None,
return_nan_array=False,
ma_fill_value=None,
return_inverse=False,
):
"""
Returns a numpy masked array containing data and mask or
a numpy float array with masked values set to NaN.
Parameters
----------
var_name : str
Data variable name.
rm_assessments : str or list of str
Assessment name to exclude from returned data.
rm_tests : int or list of int
Test numbers to exclude from returned data. This is the test
number (or bit position number) not the mask number.
return_nan_array : boolean
Return a numpy array with filtered ( or masked) values
set to numpy NaN value. If the data is type int will upconvert
to numpy float to allow setting NaN value.
ma_fill_value : int or float (or str?)
The numpy masked array fill_value used in creation of the the
masked array. If the datatype needs to be upconverted to allow
the fill value to be used, data will be upconverted.
return_inverse : boolean
Invert the masked array mask or return data array where mask is set
to False instead of True set to NaN. Useful for overplotting
where failing.
Returns
-------
variable : numpy masked array or numpy float array
Default is to return a numpy masked array with the mask set to
True where the test with requested assessment or test number
was found set.
If return_nan_array is True will return numpy array upconverted
to float with locations where the test with requested assessment
or test number was found set converted to NaN.
Examples
--------
.. code-block:: python
from act.io.armfiles import read_netcdf
from act.tests import EXAMPLE_IRT25m20s
ds = read_netcdf(EXAMPLE_IRT25m20s)
var_name = "inst_up_long_dome_resist"
result = ds.qcfilter.add_test(
var_name, index=[0, 1, 2], test_meaning="Birds!"
)
data = ds.qcfilter.get_masked_data(
var_name, rm_assessments=["Bad", "Indeterminate"]
)
print(data)
masked_array(
data=[..., 7.670499801635742, 7.689199924468994, 7.689199924468994],
mask=[..., False, False, False],
fill_value=1e20,
dtype=float32,
)
"""
qc_var_name = self._ds.qcfilter.check_for_ancillary_qc(var_name, add_if_missing=False)
flag_value = False
flag_values = None
flag_masks = None
flag_assessments = None
try:
flag_assessments = self._ds[qc_var_name].attrs['flag_assessments']
flag_masks = self._ds[qc_var_name].attrs['flag_masks']
except KeyError:
pass
try:
flag_values = self._ds[qc_var_name].attrs['flag_values']
flag_value = True
except KeyError:
pass
test_numbers = []
if rm_tests is not None:
if isinstance(rm_tests, (int, float, str)):
rm_tests = [int(rm_tests)]
test_numbers.extend(rm_tests)
if rm_assessments is not None:
if isinstance(rm_assessments, str):
rm_assessments = [rm_assessments]
if flag_masks is not None:
test_nums = [parse_bit(mask)[0] for mask in flag_masks]
if flag_values is not None:
test_nums = flag_values
rm_assessments = [x.lower() for x in rm_assessments]
if flag_assessments is not None:
for ii, assessment in enumerate(flag_assessments):
if assessment.lower() in rm_assessments:
test_numbers.append(test_nums[ii])
# Make the list of test numbers to mask unique
test_numbers = list(set(test_numbers))
# Create mask of indexes by looking where each test is set
variable = self._ds[var_name].values
nan_dtype = np.float32
if variable.dtype in (np.float64, np.int64):
nan_dtype = np.float64
mask = np.zeros(variable.shape, dtype=bool)
for test in test_numbers:
mask = mask | self._ds.qcfilter.get_qc_test_mask(var_name, test, flag_value=flag_value)
# Convert data numpy array into masked array
try:
variable = np.ma.array(variable, mask=mask, fill_value=ma_fill_value)
except TypeError:
variable = np.ma.array(
variable,
mask=mask,
fill_value=ma_fill_value,
dtype=np.array(ma_fill_value).dtype,
)
# If requested switch array from where data is not failing tests
# to where data is failing tests. This can be used when over plotting
# where the data if failing the tests.
if return_inverse:
mask = variable.mask
mask = np.invert(mask)
variable.mask = mask
# If asked to return numpy array with values set to NaN
if return_nan_array:
variable = variable.astype(nan_dtype)
variable = variable.filled(fill_value=np.nan)
return variable
def datafilter(
self,
variables=None,
rm_assessments=None,
rm_tests=None,
verbose=False,
del_qc_var=True,
):
"""
Method to apply quality control variables to data variables by
changing the data values in the dataset using quality control variables.
The data is updated with failing data set to
NaN. This can be used to update the data variable in the xarray
dataset for use with xarray methods to perform analysis on the data
since those methods don't read the quality control variables.
Parameters
----------
variables : None or str or list of str
Data variable names to process. If set to None will update all
data variables.
rm_assessments : str or list of str
Assessment names listed under quality control varible flag_assessments
to exclude from returned data. Examples include
['Bad', 'Incorrect', 'Indeterminate', 'Suspect']
rm_tests : int or list of int
Test numbers listed under quality control variable to exclude from
returned data. This is the test
number (or bit position number) not the mask number.
verbose : boolean
Print processing information.
del_qc_var : boolean
Option to delete quality control variable after processing. Since
the data values can not be determined after they are set to NaN
and xarray method processing would also process the quality control
variables, the default is to remove the quality control data
variables.
Examples
--------
.. code-block:: python
from act.io.armfiles import read_netcdf
from act.tests import EXAMPLE_MET1
ds = read_netcdf(EXAMPLE_MET1)
ds.clean.cleanup()
var_name = "atmos_pressure"
ds_1 = ds.nanmean()
ds.qcfilter.add_less_test(var_name, 99, test_assessment="Bad")
ds.qcfilter.datafilter(rm_assessments="Bad")
ds_2 = ds.nanmean()
print("All_data =", ds_1[var_name].values)
All_data = 98.86098
print("Bad_Removed =", ds_2[var_name].values)
Bad_Removed = 99.15148
"""
if rm_assessments is None and rm_tests is None:
raise ValueError('Need to set rm_assessments or rm_tests option')
if variables is not None and isinstance(variables, str):
variables = [variables]
if variables is None:
variables = list(self._ds.data_vars)
for var_name in variables:
qc_var_name = self.check_for_ancillary_qc(var_name, add_if_missing=False, cleanup=False)
if qc_var_name is None:
if verbose:
if var_name in ['base_time', 'time_offset']:
continue
try:
if self._ds[var_name].attrs['standard_name'] == 'quality_flag':
continue
except KeyError:
pass
print(f'No quality control variable for {var_name} found '
f'in call to .qcfilter.datafilter()')
continue
# Need to return data as Numpy array with NaN values. Setting the Dask array
# to Numpy masked array does not work with other tools.
data = self.get_masked_data(
var_name,
rm_assessments=rm_assessments,
rm_tests=rm_tests,
return_nan_array=True
)
# If data was orginally stored as Dask array return values to Dataset as Dask array
# else set as Numpy array.
try:
self._ds[var_name].data = dask.array.from_array(
data, chunks=self._ds[var_name].data.chunksize)
except AttributeError:
self._ds[var_name].values = data
# If requested delete quality control variable
if del_qc_var:
del self._ds[qc_var_name]
if verbose:
print(f'Deleting {qc_var_name} from dataset')
def set_bit(array, bit_number):
"""
Function to set a quality control bit given a scalar or
array of values and a bit number.
Parameters
----------
array : int list of int or numpy array of int
The bitpacked array to set the bit number.
bit_number : int
The bit (or test) number to set starting at 1.
Returns
-------
array : int, numpy array, tuple, list
Integer or numpy array with bit set for each element of the array.
Returned in same type.
Examples
--------
Example use setting bit 2 to an array called data:
.. code-block:: python
from act.qc.qcfilter import set_bit
data = np.array(range(0, 7))
data = set_bit(data, 2)
print(data)
array([2, 3, 2, 3, 6, 7, 6])
"""
was_list = False
was_tuple = False
if isinstance(array, list):
array = np.array(array)
was_list = True
if isinstance(array, tuple):
array = np.array(array)
was_tuple = True
if bit_number > 0:
array |= 1 << bit_number - 1
if was_list:
array = list(array)
if was_tuple:
array = tuple(array)
return array
def unset_bit(array, bit_number):
"""
Function to remove a quality control bit given a
scalar or array of values and a bit number.
Parameters
----------
array : int list of int or numpy array
Array of integers containing bit packed numbers.
bit_number : int
Bit number to remove starting at 1.
Returns
-------
array : int or numpy array
Returns same data type as array entered with bit removed. Will
fail gracefully if the bit requested to be removed was not set.
Examples
--------
.. code-block:: python
from act.qc.qcfilter import set_bit, unset_bit
data = set_bit([0, 1, 2, 3, 4], 2)
data = set_bit(data, 3)
print(data)
[6, 7, 6, 7, 6]
data = unset_bit(data, 2)
print(data)
[4, 5, 4, 5, 4]
"""
was_list = False
was_tuple = False
if isinstance(array, list):
array = np.array(array)
was_list = True
if isinstance(array, tuple):
array = np.array(array)
was_tuple = True
if bit_number > 0:
array &= ~(1 << bit_number - 1)
if was_list:
array = list(array)
if was_tuple:
array = tuple(array)
return array
def parse_bit(qc_bit):
"""
Given a single integer value, return bit positions.
Parameters
----------
qc_bit : int or numpy int
Bit packed integer number to be parsed.
Returns
-------
bit_number : numpy.int32 array
Array containing all bit numbers of the bit packed number.
If no bits set returns empty array.
Examples
--------
.. code-block:: python
from act.qc.qcfilter import parse_bit
parse_bit(7)
array([1, 2, 3], dtype=int32)
"""
if isinstance(qc_bit, (list, tuple, np.ndarray)):
if len(qc_bit) > 1:
raise ValueError('Must be a single value.')
qc_bit = qc_bit[0]
if qc_bit < 0:
raise ValueError('Must be a positive integer.')
# Convert integer value to single element numpy array of type unsigned integer 64
value = np.array([qc_bit]).astype(">u8")
# Convert value to view containing only unsigned integer 8 data type. This
# is required for the numpy unpackbits function which only works with
# unsigned integer 8 bit data type.
value = value.view("u1")
# Unpack bits using numpy into array of 1 where bit is set and convert into boolean array
index = np.unpackbits(value).astype(bool)
# Create range of numbers from 64 to 1 and subset where unpackbits found a bit set.
bit_number = np.arange(index.size, 0, -1)[index]
# Flip the array to increasing numbers to match historical method
bit_number = np.flip(bit_number)
# bit_number = []
# qc_bit = int(qc_bit)
# counter = 0
# while qc_bit > 0:
# temp_value = qc_bit % 2
# qc_bit = qc_bit >> 1
# counter += 1
# if temp_value == 1:
# bit_number.append(counter)
# Convert data type into expected type
bit_number = np.asarray(bit_number, dtype=np.int32)
return bit_number
<|code_end|>
|
act/io/armfiles.py
<|code_start|>"""
This module contains I/O operations for loading files that were created for the
Atmospheric Radiation Measurement program supported by the Department of Energy
Office of Science.
"""
import copy
import glob
import json
import re
import urllib
import warnings
from pathlib import Path, PosixPath
from netCDF4 import Dataset
from os import PathLike
import tarfile
import tempfile
import numpy as np
import xarray as xr
import datetime as dt
import act
import act.utils as utils
from act.config import DEFAULT_DATASTREAM_NAME
from act.utils.io_utils import unpack_tar, unpack_gzip, cleanup_files, is_gunzip_file
def read_netcdf(
filenames,
concat_dim=None,
return_None=False,
combine='by_coords',
decode_times=True,
use_cftime=True,
use_base_time=False,
combine_attrs='override',
cleanup_qc=False,
keep_variables=None,
**kwargs,
):
"""
Returns `xarray.Dataset` with stored data and metadata from a user-defined
query of ARM-standard netCDF files from a single datastream. Has some procedures
to ensure time is correctly fomatted in returned Dataset.
Parameters
----------
filenames : str, pathlib.PosixPath, list of str, list of pathlib.PosixPath
Name of file(s) to read.
concat_dim : str
Dimension to concatenate files along.
return_None : boolean
Catch IOError exception when file not found and return None.
Default is False.
combine : str
String used by xarray.open_mfdataset() to determine how to combine
data files into one Dataset. See Xarray documentation for options.
decode_times : boolean
Standard Xarray option to decode time values from int/float to python datetime values.
Appears the default is to do this anyway but need this option to allow correct usage
of use_base_time.
use_cftime : boolean
Option to use cftime library to parse the time units string and correctly
establish the time values with a units string containing timezone offset.
This is used because the Pandas units string parser does not correctly recognize
time zone offset. Code will automatically detect cftime object and convert to datetime64
in returned Dataset.
use_base_time : boolean
Option to use ARM time variables base_time and time_offset. Useful when the time variable
is not included (older files) or when the units attribute is incorrectly formatted. Will use
the values of base_time and time_offset as seconds since epoch and create datetime64 values
for time coordinate. If set will change decode_times and use_cftime to False.
combine_attrs : str
String indicating how to combine attrs of the datasets being merged
cleanup_qc : boolean
Call clean.cleanup() method to convert to standardized ancillary quality control
variables. This will not allow any keyword options, so if non-default behavior is
desired will need to call clean.cleanup() method on the dataset after reading the data.
keep_variables : str or list of str
Variable names to read from data file. Works by creating a list of variable names
to exclude from reading and passing into open_mfdataset() via drop_variables keyword.
Still allows use of drop_variables keyword for variables not listed in first file to
read.
**kwargs : keywords
Keywords to pass through to xarray.open_mfdataset().
Returns
-------
ds : xarray.Dataset (or None)
ACT Xarray dataset (or None if no data file(s) found).
Examples
--------
This example will load the example sounding data used for unit testing.
.. code-block :: python
import act
ds = act.io.armfiles.read_netcdf(act.tests.sample_files.EXAMPLE_SONDE_WILDCARD)
print(ds)
"""
ds = None
filenames, cleanup_temp_directory = check_if_tar_gz_file(filenames)
file_dates = []
file_times = []
# If requested to use base_time and time_offset, set keywords to correct attribute values
# to pass into xarray open_mfdataset(). Need to turn off decode_times and use_cftime
# or else will try to convert base_time and time_offset. Depending on values of attributes
# may cause a failure.
if use_base_time:
decode_times = False
use_cftime = False
# Add funciton keywords to kwargs dictionary for passing into open_mfdataset.
kwargs['combine'] = combine
kwargs['concat_dim'] = concat_dim
kwargs['decode_times'] = decode_times
kwargs['use_cftime'] = use_cftime
if len(filenames) > 1 and not isinstance(filenames, str):
kwargs['combine_attrs'] = combine_attrs
# Check if keep_variables is set. If so determine correct drop_variables
if keep_variables is not None:
drop_variables = None
if 'drop_variables' in kwargs.keys():
drop_variables = kwargs['drop_variables']
kwargs['drop_variables'] = keep_variables_to_drop_variables(
filenames, keep_variables, drop_variables=drop_variables)
# Create an exception tuple to use with try statements. Doing it this way
# so we can add the FileNotFoundError if requested. Can add more error
# handling in the future.
except_tuple = (ValueError,)
if return_None:
except_tuple = except_tuple + (FileNotFoundError, OSError)
try:
# Read data file with Xarray function
ds = xr.open_mfdataset(filenames, **kwargs)
except except_tuple as exception:
# If requested return None for File not found error
if type(exception).__name__ == 'FileNotFoundError':
return None
# If requested return None for File not found error
if type(exception).__name__ == 'OSError' and exception.args[0] == 'no files to open':
return None
# Look at error message and see if could be nested error message. If so
# update combine keyword and try again. This should allow reading files
# without a time variable but base_time and time_offset variables.
if (
kwargs['combine'] != 'nested'
and type(exception).__name__ == 'ValueError'
and exception.args[0] == 'Could not find any dimension coordinates '
'to use to order the datasets for concatenation'
):
kwargs['combine'] = 'nested'
ds = xr.open_mfdataset(filenames, **kwargs)
else:
# When all else fails raise the orginal exception
raise exception
# If requested use base_time and time_offset to derive time. Assumes that the units
# of both are in seconds and that the value is number of seconds since epoch.
if use_base_time:
time = (ds['base_time'].values + ds['time_offset'].values) * 1000000.0
time = np.array(time, dtype='datetime64[us]')
# Need to use a new Dataset creation to correctly index time for use with
# .group and .resample methods in Xarray Datasets.
temp_ds = xr.Dataset({'time': (ds['time'].dims, time, ds['time'].attrs)})
ds['time'] = temp_ds['time']
del temp_ds
for att_name in ['units', 'ancillary_variables']:
try:
del ds['time'].attrs[att_name]
except KeyError:
pass
# Xarray has issues reading a CF formatted time units string if it contains
# timezone offset without a [+|-] preceeding timezone offset.
# https://github.com/pydata/xarray/issues/3644
# To ensure the times are read in correctly need to set use_cftime=True.
# This will read in time as cftime object. But Xarray uses numpy datetime64
# natively. This will convert the cftime time values to numpy datetime64. cftime
# does not preserve the time past ms precision. We will use ms precision for
# the conversion.
desired_time_precision = 'datetime64[ms]'
for var_name in ['time', 'time_offset']:
try:
if 'time' in ds.dims and type(ds[var_name].values[0]).__module__.startswith('cftime.'):
# If we just convert time to datetime64 the group, sel, and other Xarray
# methods will not work correctly because time is not indexed. Need to
# use the formation of a Dataset to correctly set the time indexing.
temp_ds = xr.Dataset(
{
var_name: (
ds[var_name].dims,
ds[var_name].values.astype(desired_time_precision),
ds[var_name].attrs,
)
}
)
ds[var_name] = temp_ds[var_name]
del temp_ds
# If time_offset is in file try to convert base_time as well
if var_name == 'time_offset':
ds['base_time'].values = ds['base_time'].values.astype(desired_time_precision)
ds['base_time'] = ds['base_time'].astype(desired_time_precision)
except KeyError:
pass
# Check if "time" variable is not in the netCDF file. If so try to use
# base_time and time_offset to make time variable. Basically a fix for incorrectly
# formatted files. May require using decode_times=False to initially read the data.
if 'time' in ds.dims and not np.issubdtype(ds['time'].dtype, np.datetime64):
try:
ds['time'] = ds['time_offset']
except (KeyError, ValueError):
pass
# Adding support for wildcards
if isinstance(filenames, str):
filenames = glob.glob(filenames)
elif isinstance(filenames, PosixPath):
filenames = [filenames]
# Get file dates and times that were read in to the dataset
filenames.sort()
for f in filenames:
f = Path(f).name
pts = re.match(r'(^[a-zA-Z0-9]+)\.([0-9a-z]{2})\.([\d]{8})\.([\d]{6})\.([a-z]{2,3}$)', f)
# If Not ARM format, read in first time for info
if pts is not None:
pts = pts.groups()
file_dates.append(pts[2])
file_times.append(pts[3])
else:
if ds['time'].size > 1:
dummy = ds['time'].values[0]
else:
dummy = ds['time'].values
file_dates.append(utils.numpy_to_arm_date(dummy))
file_times.append(utils.numpy_to_arm_date(dummy, returnTime=True))
# Add attributes
ds.attrs['_file_dates'] = file_dates
ds.attrs['_file_times'] = file_times
is_arm_file_flag = check_arm_standards(ds)
# Ensure that we have _datastream set whether or no there's
# a datastream attribute already.
if is_arm_file_flag == 0:
ds.attrs['_datastream'] = DEFAULT_DATASTREAM_NAME
else:
ds.attrs['_datastream'] = ds.attrs['datastream']
ds.attrs['_arm_standards_flag'] = is_arm_file_flag
if cleanup_qc:
ds.clean.cleanup()
if cleanup_temp_directory:
cleanup_files(files=filenames)
return ds
def keep_variables_to_drop_variables(
filenames,
keep_variables,
drop_variables=None):
"""
Returns a list of variable names to exclude from reading by passing into
`Xarray.open_dataset` drop_variables keyword. This can greatly help reduce
loading time and disk space use of the Dataset.
When passed a netCDF file name, will open the file using the netCDF4 library to get
list of variable names. There is less overhead reading the varible names using
netCDF4 library than Xarray. If more than one filename is provided or string is
used for shell syntax globbing, will use the first file in the list.
Parameters
----------
filenames : str, pathlib.PosixPath or list of str
Name of file(s) to read.
keep_variables : str or list of str
Variable names desired to keep. Do not need to list associated dimention
names. These will be automatically kept as well.
drop_variables : str or list of str
Variable names to explicitly add to returned list. May be helpful if a variable
exists in a file that is not in the first file in the list.
Returns
-------
drop_vars : list of str
Variable names to exclude from returned Dataset by using drop_variables keyword
when calling Xarray.open_dataset().
Examples
--------
.. code-block :: python
import act
filename = '/data/datastream/hou/houkasacrcfrM1.a1/houkasacrcfrM1.a1.20220404.*.nc'
drop_vars = act.io.armfiles.keep_variables_to_drop_variables(
filename, ['lat','lon','alt','crosspolar_differential_phase'],
drop_variables='variable_name_that_only_exists_in_last_file_of_the_day')
"""
read_variables = []
return_variables = []
if isinstance(keep_variables, str):
keep_variables = [keep_variables]
if isinstance(drop_variables, str):
drop_variables = [drop_variables]
# If filenames is a list subset to first file name.
if isinstance(filenames, (list, tuple)):
filename = filenames[0]
# If filenames is a string, check if it needs to be expanded in shell
# first. Then use first returned file name. Else use the string filename.
elif isinstance(filenames, str):
filename = glob.glob(filenames)
if len(filename) == 0:
return return_variables
else:
filename.sort()
filename = filename[0]
# Use netCDF4 library to extract the variable and dimension names.
rootgrp = Dataset(filename, 'r')
read_variables = list(rootgrp.variables)
dimensions = list(rootgrp.dimensions)
# Loop over the variables to exclude needed coordinate dimention names.
dims_to_keep = []
for var_name in keep_variables:
try:
dims_to_keep.extend(list(rootgrp[var_name].dimensions))
except IndexError:
pass
rootgrp.close()
# Remove names not matching keep_varibles excluding the associated coordinate dimentions
return_variables = set(read_variables) - set(keep_variables) - set(dims_to_keep)
# Add drop_variables to list
if drop_variables is not None:
return_variables = set(return_variables) | set(drop_variables)
return list(return_variables)
def check_arm_standards(ds):
"""
Checks to see if an xarray dataset conforms to ARM standards.
Parameters
----------
ds : Xarray Dataset
The dataset to check.
Returns
-------
flag : int
The flag corresponding to whether or not the file conforms
to ARM standards. Bit packed, so 0 for no, 1 for yes
"""
the_flag = 1 << 0
if 'datastream' not in ds.attrs.keys():
the_flag = 0
# Check if the historical global attribute name is
# used instead of updated name of 'datastream'. If so
# correct the global attributes and flip flag.
if 'zeb_platform' in ds.attrs.keys():
ds.attrs['datastream'] = copy.copy(ds.attrs['zeb_platform'])
del ds.attrs['zeb_platform']
the_flag = 1 << 0
return the_flag
def create_ds_from_arm_dod(proc, set_dims, version='', fill_value=-9999.0, scalar_fill_dim=None, local_file=False):
"""
Queries the ARM DOD api and builds a dataset based on the ARM DOD and
the dimension sizes that are passed in.
Parameters
----------
proc : string
Process to create the dataset off of. This is normally in the
format of inst.level. i.e. vdis.b1 or kazrge.a1. If local file
is true, this points to the path of the .dod file.
set_dims : dict
Dictionary of dims from the DOD and the corresponding sizes.
Time is required. Code will try and pull from DOD, unless set
through this variable
Note: names need to match exactly what is in the dod
i.e. {'drop_diameter': 50, 'time': 1440}
version : string
Version number of the ingest to use. If not set, defaults to
latest version
fill_value : float
Fill value for non-dimension variables. Dimensions cannot have
duplicate values and are incrementally set (0, 1, 2)
scalar_fill_dim : str
Depending on how the dataset is set up, sometimes the scalar values
are dimensioned to the main dimension. i.e. a lat/lon is set to have
a dimension of time. This is a way to set it up similarly.
local_file: bool
If true, the DOD will be loaded from a file whose name is proc.
If false, the DOD will be pulled from PCM.
Returns
-------
ds : xarray.Dataset
ACT Xarray dataset populated with all variables and attributes.
Examples
--------
.. code-block :: python
dims = {'time': 1440, 'drop_diameter': 50}
ds = act.io.armfiles.create_ds_from_arm_dod(
'vdis.b1', dims, version='1.2', scalar_fill_dim='time')
"""
# Set base url to get DOD information
if local_file is False:
base_url = 'https://pcm.arm.gov/pcm/api/dods/'
# Get data from DOD api
with urllib.request.urlopen(base_url + proc) as url:
data = json.loads(url.read().decode())
else:
with open(proc) as file:
data = json.loads(file.read())
# Check version numbers and alert if requested version in not available
keys = list(data['versions'].keys())
if version not in keys:
warnings.warn(
' '.join(
['Version:', version, 'not available or not specified. Using Version:', keys[-1]]
),
UserWarning,
)
version = keys[-1]
# Create empty xarray dataset
ds = xr.Dataset()
# Get the global attributes and add to dataset
atts = {}
for a in data['versions'][version]['atts']:
if a['name'] == 'string':
continue
if a['value'] is None:
a['value'] = ''
atts[a['name']] = a['value']
ds.attrs = atts
# Get variable information and create dataarrays that are
# then added to the dataset
# If not passed in through set_dims, will look to the DOD
# if not set in the DOD, then will raise error
variables = data['versions'][version]['vars']
dod_dims = data['versions'][version]['dims']
for d in dod_dims:
if d['name'] not in list(set_dims.keys()):
if d['length'] > 0:
set_dims[d['name']] = d['length']
else:
raise ValueError(
'Dimension length not set in DOD for '
+ d['name']
+ ', nor passed in through set_dim'
)
for v in variables:
dims = v['dims']
dim_shape = []
# Using provided dimension data, fill array accordingly for easy overwrite
if len(dims) == 0:
if scalar_fill_dim is None:
data_na = fill_value
else:
data_na = np.full(set_dims[scalar_fill_dim], fill_value)
v['dims'] = scalar_fill_dim
else:
for d in dims:
dim_shape.append(set_dims[d])
if len(dim_shape) == 1 and v['name'] == dims[0]:
data_na = np.arange(dim_shape[0])
else:
data_na = np.full(dim_shape, fill_value)
# Get attribute information. Had to do some things to get to print to netcdf
atts = {}
str_flag = False
for a in v['atts']:
if a['name'] == 'string':
str_flag = True
continue
if a['value'] is None:
continue
if str_flag and a['name'] == 'units':
continue
atts[a['name']] = a['value']
da = xr.DataArray(data=data_na, dims=v['dims'], name=v['name'], attrs=atts)
ds[v['name']] = da
return ds
@xr.register_dataset_accessor('write')
class WriteDataset:
"""
Class for cleaning up Dataset before writing to file.
"""
def __init__(self, xarray_ds):
self._ds = xarray_ds
def write_netcdf(
self,
cleanup_global_atts=True,
cleanup_qc_atts=True,
join_char='__',
make_copy=True,
cf_compliant=False,
delete_global_attrs=['qc_standards_version', 'qc_method', 'qc_comment'],
FillValue=-9999,
cf_convention='CF-1.8',
**kwargs,
):
"""
This is a wrapper around Dataset.to_netcdf to clean up the Dataset before
writing to disk. Some things are added to global attributes during ACT reading
process, and QC variables attributes are modified during QC cleanup process.
This will modify before writing to disk to better
match Climate & Forecast standards.
Parameters
----------
cleanup_global_atts : boolean
Option to cleanup global attributes by removing any global attribute
that starts with an underscore.
cleanup_qc_atts : boolean
Option to convert attributes that would be written as string array
to be a single character string. CF 1.7 does not allow string attribures.
Will use a single space a delimeter between values and join_char to replace
white space between words.
join_char : str
The character sting to use for replacing white spaces between words when converting
a list of strings to single character string attributes.
make_copy : boolean
Make a copy before modifying Dataset to write. For large Datasets this
may add processing time and memory. If modifying the Dataset is OK
try setting to False.
cf_compliant : boolean
Option to output file with additional attributes to make file Climate & Forecast
complient. May require runing .clean.cleanup() method on the dataset to fix other
issues first. This does the best it can but it may not be truely complient. You
should read the CF documents and try to make complient before writing to file.
delete_global_attrs : list
Optional global attributes to be deleted. Defaults to some standard
QC attributes that are not needed. Can add more or set to None to not
remove the attributes.
FillValue : int, float
The value to use as a _FillValue in output file. This is used to fix
issues with how Xarray handles missing_value upon reading. It's confusing
so not a perfect fix. Set to None to leave Xarray to do what it wants.
Set to a value to be the value used as _FillValue in the file and data
array. This should then remove missing_value attribute from the file as well.
cf_convention : str
The Climate and Forecast convention string to add to Conventions attribute.
**kwargs : keywords
Keywords to pass through to Dataset.to_netcdf()
Examples
--------
.. code-block :: python
ds.write.write_netcdf(path='output.nc')
"""
if make_copy:
write_ds = copy.deepcopy(self._ds)
else:
write_ds = self._ds
encoding = {}
if cleanup_global_atts:
for attr in list(write_ds.attrs):
if attr.startswith('_'):
del write_ds.attrs[attr]
if cleanup_qc_atts:
check_atts = ['flag_meanings', 'flag_assessments']
for var_name in list(write_ds.data_vars):
if 'standard_name' not in write_ds[var_name].attrs.keys():
continue
for attr_name in check_atts:
try:
att_values = write_ds[var_name].attrs[attr_name]
if isinstance(att_values, (list, tuple)):
att_values = [att_value.replace(' ', join_char) for att_value in att_values]
write_ds[var_name].attrs[attr_name] = ' '.join(att_values)
except KeyError:
pass
# Tell .to_netcdf() to not add a _FillValue attribute for
# quality control variables.
if FillValue is not None:
encoding[var_name] = {'_FillValue': None}
# Clean up _FillValue vs missing_value mess by creating an
# encoding dictionary with each variable's _FillValue set to
# requested fill value. May need to improve upon this for data type
# and other issues in the future.
if FillValue is not None:
skip_variables = ['base_time', 'time_offset', 'qc_time'] + list(encoding.keys())
for var_name in list(write_ds.data_vars):
if var_name not in skip_variables:
encoding[var_name] = {'_FillValue': FillValue}
if delete_global_attrs is not None:
for attr in delete_global_attrs:
try:
del write_ds.attrs[attr]
except KeyError:
pass
for var_name in list(write_ds.keys()):
if 'string' in list(write_ds[var_name].attrs.keys()):
att = write_ds[var_name].attrs['string']
write_ds[var_name].attrs[var_name + '_string'] = att
del write_ds[var_name].attrs['string']
# If requested update global attributes and variables attributes for required
# CF attributes.
if cf_compliant:
# Get variable names and standard name for each variable
var_names = list(write_ds.keys())
standard_names = []
for var_name in var_names:
try:
standard_names.append(write_ds[var_name].attrs['standard_name'])
except KeyError:
standard_names.append(None)
# Check if time varible has axis and standard_name attribute
coord_name = 'time'
try:
write_ds[coord_name].attrs['axis']
except KeyError:
try:
write_ds[coord_name].attrs['axis'] = 'T'
except KeyError:
pass
try:
write_ds[coord_name].attrs['standard_name']
except KeyError:
try:
write_ds[coord_name].attrs['standard_name'] = 'time'
except KeyError:
pass
# Try to determine type of dataset by coordinate dimention named time
# and other factors
try:
write_ds.attrs['FeatureType']
except KeyError:
dim_names = list(write_ds.dims)
FeatureType = None
if dim_names == ['time']:
FeatureType = 'timeSeries'
elif len(dim_names) == 2 and 'time' in dim_names and 'bound' in dim_names:
FeatureType = 'timeSeries'
elif len(dim_names) >= 2 and 'time' in dim_names:
for var_name in var_names:
dims = list(write_ds[var_name].dims)
if len(dims) == 2 and 'time' in dims:
prof_dim = list(set(dims) - {'time'})[0]
if write_ds[prof_dim].values.size > 2:
FeatureType = 'timeSeriesProfile'
break
if FeatureType is not None:
write_ds.attrs['FeatureType'] = FeatureType
# Add axis and positive attributes to variables with standard_name
# equal to 'altitude'
alt_variables = [
var_names[ii] for ii, sn in enumerate(standard_names) if sn == 'altitude'
]
for var_name in alt_variables:
try:
write_ds[var_name].attrs['axis']
except KeyError:
write_ds[var_name].attrs['axis'] = 'Z'
try:
write_ds[var_name].attrs['positive']
except KeyError:
write_ds[var_name].attrs['positive'] = 'up'
# Check if the Conventions global attribute lists the CF convention
try:
Conventions = write_ds.attrs['Conventions']
Conventions = Conventions.split()
cf_listed = False
for ii in Conventions:
if ii.startswith('CF-'):
cf_listed = True
break
if not cf_listed:
Conventions.append(cf_convention)
write_ds.attrs['Conventions'] = ' '.join(Conventions)
except KeyError:
write_ds.attrs['Conventions'] = str(cf_convention)
# Reorder global attributes to ensure history is last
try:
history = copy.copy(write_ds.attrs['history'])
del write_ds.attrs['history']
write_ds.attrs['history'] = history
except KeyError:
pass
current_time = dt.datetime.now().replace(microsecond=0)
if 'history' in list(write_ds.attrs.keys()):
write_ds.attrs['history'] += ''.join(['\n', str(current_time), ' created by ACT ', str(act.__version__),
' act.io.write.write_netcdf'])
write_ds.to_netcdf(encoding=encoding, **kwargs)
def check_if_tar_gz_file(filenames):
"""
Unpacks gunzip and/or TAR file contents and returns Xarray Dataset
...
Parameters
----------
filenames : str, pathlib.Path
Filenames to check if gunzip and/or tar files.
Returns
-------
filenames : Paths to extracted files from gunzip or TAR files
"""
cleanup = False
if isinstance(filenames, (str, PathLike)):
try:
if is_gunzip_file(filenames) or tarfile.is_tarfile(str(filenames)):
tmpdirname = tempfile.mkdtemp()
cleanup = True
if is_gunzip_file(filenames):
filenames = unpack_gzip(filenames, write_directory=tmpdirname)
if tarfile.is_tarfile(str(filenames)):
filenames = unpack_tar(filenames, write_directory=tmpdirname, randomize=False)
except Exception:
pass
return filenames, cleanup
def read_mmcr(filenames):
"""
Reads in ARM MMCR files and splits up the variables into specific
mode variables based on what's in the files. MMCR files have the modes
interleaved and are not readable using xarray so some modifications are
needed ahead of time.
Parameters
----------
filenames : str, pathlib.PosixPath or list of str
Name of file(s) to read.
Returns
-------
ds : xarray.Dataset (or None)
ACT Xarray dataset (or None if no data file(s) found).
"""
# Sort the files to make sure they concatenate right
filenames.sort()
# Run through each file and read it in using netCDF4, then
# read it in with xarray
multi_ds = []
for f in filenames:
nc = Dataset(f, "a")
# Change heights name to range to read appropriately to xarray
if 'heights' in nc.dimensions:
nc.renameDimension('heights', 'range')
if nc is not None:
ds = xr.open_dataset(xr.backends.NetCDF4DataStore(nc))
multi_ds.append(ds)
# Concatenate datasets together
if len(multi_ds) > 1:
ds = xr.concat(multi_ds, dim='time')
else:
ds = multi_ds[0]
# Get mdoes and ranges with time/height modes
modes = ds['mode'].values
mode_vars = []
for v in ds:
if 'range' in ds[v].dims and 'time' in ds[v].dims and len(ds[v].dims) == 2:
mode_vars.append(v)
# For each mode, run extract data variables if available
# saves as individual variables in the file.
for m in modes:
if len(ds['ModeDescription'].shape) > 1:
mode_desc = ds['ModeDescription'].values[0, m]
if np.isnan(ds['heights'].values[0, m, :]).all():
continue
range_data = ds['heights'].values[0, m, :]
else:
mode_desc = ds['ModeDescription'].values[m]
if np.isnan(ds['heights'].values[m, :]).all():
continue
range_data = ds['heights'].values[m, :]
mode_desc = str(mode_desc).split('_')[-1][0:-1]
mode_desc = str(mode_desc).split('\'')[0]
idx = np.where(ds['ModeNum'].values == m)[0]
idy = np.where(~np.isnan(range_data))[0]
for v in mode_vars:
new_var_name = v + '_' + mode_desc
time_name = 'time_' + mode_desc
range_name = 'range_' + mode_desc
data = ds[v].values[idx, :]
data = data[:, idy]
attrs = ds[v].attrs
da = xr.DataArray(
data=data,
coords={time_name: ds['time'].values[idx], range_name: range_data[idy]},
dims=[time_name, range_name],
attrs=attrs
)
ds[new_var_name] = da
return ds
<|code_end|>
act/qc/arm.py
<|code_start|>"""
Functions specifically for working with QC/DQRs from
the Atmospheric Radiation Measurement Program (ARM).
"""
import datetime as dt
import numpy as np
import requests
from act.config import DEFAULT_DATASTREAM_NAME
def add_dqr_to_qc(
ds,
variable=None,
assessment='incorrect,suspect',
exclude=None,
include=None,
normalize_assessment=True,
cleanup_qc=True,
dqr_link=False,
):
"""
Function to query the ARM DQR web service for reports and
add as a new quality control test to ancillary quality control
variable. If no anicllary quality control variable exist a new
one will be created and lined to the data variable through
ancillary_variables attribure.
See online documentation from ARM Data
Quality Office on the use of the DQR web service.
https://code.arm.gov/docs/dqrws-examples/wikis/home
Information about the DQR web-service avaible at
https://adc.arm.gov/dqrws/
Parameters
----------
ds : xarray.Dataset
Xarray dataset
variable : string, or list of str, or None
Variables to check DQR web service. If set to None will
attempt to update all variables.
assessment : string
assessment type to get DQRs. Current options include
'missing', 'suspect', 'incorrect' or any combination separated
by a comma.
exclude : list of strings
DQR IDs to exclude from adding into QC
include : list of strings
List of DQR IDs to include in flagging of data. Any other DQR IDs
will be ignored.
normalize_assessment : boolean
The DQR assessment term is different than the embedded QC
term. Embedded QC uses "Bad" and "Indeterminate" while
DQRs use "Incorrect" and "Suspect". Setting this will ensure
the same terms are used for both.
cleanup_qc : boolean
Call clean.cleanup() method to convert to standardized ancillary
quality control variables. Has a little bit of overhead so
if the Dataset has already been cleaned up, no need to run.
dqr_link : boolean
Prints out a link for each DQR to read the full DQR. Defaults to False
Returns
-------
ds : xarray.Dataset
Xarray dataset containing new quality control variables
Examples
--------
.. code-block:: python
from act.qc.arm import add_dqr_to_qc
ds = add_dqr_to_qc(ds, variable=['temp_mean', 'atmos_pressure'])
"""
# DQR Webservice goes off datastreams, pull from the dataset
if 'datastream' in ds.attrs:
datastream = ds.attrs['datastream']
elif '_datastream' in ds.attrs:
datastream = ds.attrs['_datastream']
else:
raise ValueError('Dataset does not have datastream attribute')
if datastream == DEFAULT_DATASTREAM_NAME:
raise ValueError("'datastream' name required for DQR service set to default value "
f"{datastream}. Unable to perform DQR service query.")
# Clean up QC to conform to CF conventions
if cleanup_qc:
ds.clean.cleanup()
# In order to properly flag data, get all variables if None. Exclude QC variables.
if variable is None:
variable = list(set(ds.data_vars) - set(ds.clean.matched_qc_variables))
# Check to ensure variable is list
if not isinstance(variable, (list, tuple)):
variable = [variable]
# Loop through each variable and call web service for that variable
for var_name in variable:
# Create URL
url = 'http://www.archive.arm.gov/dqrws/ARMDQR?datastream='
url += datastream
url += '&varname=' + var_name
url += ''.join(
[
'&searchmetric=',
assessment,
'&dqrfields=dqrid,starttime,endtime,metric,subject',
]
)
# Call web service
req = requests.get(url)
# Check status values and raise error if not successful
status = req.status_code
if status == 400:
raise ValueError('Check parameters')
if status == 500:
raise ValueError('DQR Webservice Temporarily Down')
# Get data and run through each dqr
dqrs = req.text.splitlines()
time = ds['time'].values
dqr_results = {}
for line in dqrs:
line = line.split('|')
dqr_no = line[0]
# Exclude DQRs if in list
if exclude is not None and dqr_no in exclude:
continue
# Only include if in include list
if include is not None and dqr_no not in include:
continue
starttime = np.datetime64(dt.datetime.utcfromtimestamp(int(line[1])))
endtime = np.datetime64(dt.datetime.utcfromtimestamp(int(line[2])))
ind = np.where((time >= starttime) & (time <= endtime))
if ind[0].size == 0:
continue
if dqr_no in dqr_results.keys():
dqr_results[dqr_no]['index'] = np.append(dqr_results[dqr_no]['index'], ind)
else:
dqr_results[dqr_no] = {
'index': ind,
'test_assessment': line[3],
'test_meaning': ': '.join([dqr_no, line[-1]]),
}
if dqr_link:
print_url = 'https://adc.arm.gov/ArchiveServices/DQRService?dqrid=' + str(dqr_no)
print(dqr_no, '-', line[3], ':', print_url)
for key, value in dqr_results.items():
try:
ds.qcfilter.add_test(
var_name,
index=value['index'],
test_meaning=value['test_meaning'],
test_assessment=value['test_assessment'],
)
except IndexError:
print(f"Skipping '{var_name}' DQR application because of IndexError")
if normalize_assessment:
ds.clean.normalize_assessment(variables=var_name)
return ds
<|code_end|>
act/qc/qcfilter.py
<|code_start|>"""
Functions and methods for creating ancillary quality control variables
and filters (masks) which can be used with various corrections
routines in ACT.
"""
import dask
import numpy as np
import xarray as xr
from act.qc import comparison_tests, qctests, bsrn_tests
@xr.register_dataset_accessor('qcfilter')
class QCFilter(qctests.QCTests, comparison_tests.QCTests, bsrn_tests.QCTests):
"""
A class for building quality control variables containing arrays for
filtering data based on a set of test condition typically based on the
values in the data fields. These filters can be used in various
algorithms and calculations within ACT.
"""
def __init__(self, ds):
"""initialize"""
self._ds = ds
def check_for_ancillary_qc(
self,
var_name,
add_if_missing=True,
cleanup=False,
flag_type=False
):
"""
Method to check if a quality control variable exist in the dataset
and return the quality control varible name.
Will call create_qc_variable() to make variable if does not exist
and update_ancillary_variable() to ensure linkage between data and
quality control variable. Can also be used just to get the
corresponding quality control variable name with adding if
it is missing.
Parameters
----------
var_name : str
Data variable name.
add_if_missing : boolean
Add quality control variable if missing from teh dataset. Will raise
and exception if the var_name does not exist in Dataset. Set to False
to not raise exception.
cleanup : boolean
Option to run qc.clean.cleanup() method on the dataset
to ensure the dataset was updated from ARM QC to the
correct standardized QC.
flag_type : boolean
Indicating the QC variable uses flag_values instead of
flag_masks.
Returns
-------
qc_var_name : str or None
Name of existing or new quality control variable. Returns
None if no existing quality control variable is found and
add_if_missing is set to False.
Examples
--------
.. code-block:: python
from act.tests import EXAMPLE_METE40
from act.io.armfiles import read_netcdf
ds = read_netcdf(EXAMPLE_METE40, cleanup_qc=True)
qc_var_name = ds.qcfilter.check_for_ancillary_qc('atmos_pressure')
print(f'qc_var_name: {qc_var_name}')
qc_var_name = ds.qcfilter.check_for_ancillary_qc('the_greatest_variable_ever',
add_if_missing=False)
print(f'qc_var_name: {qc_var_name}')
"""
qc_var_name = None
try:
ancillary_variables = self._ds[var_name].attrs['ancillary_variables']
if isinstance(ancillary_variables, str):
ancillary_variables = ancillary_variables.split()
for var in ancillary_variables:
for attr, value in self._ds[var].attrs.items():
if attr == 'standard_name' and 'quality_flag' in value:
qc_var_name = var
if add_if_missing and qc_var_name is None:
qc_var_name = self._ds.qcfilter.create_qc_variable(var_name, flag_type=flag_type)
except KeyError:
# Since no ancillary_variables exist look for ARM style of QC
# variable name. If it exists use it else create new
# QC varaible.
if add_if_missing:
try:
self._ds['qc_' + var_name]
qc_var_name = 'qc_' + var_name
except KeyError:
qc_var_name = self._ds.qcfilter.create_qc_variable(
var_name, flag_type=flag_type
)
# Make sure data varaible has a variable attribute linking
# data variable to QC variable.
if add_if_missing:
self._ds.qcfilter.update_ancillary_variable(var_name, qc_var_name)
# Clean up quality control variables to the requried standard in the
# xarray dataset.
if cleanup:
self._ds.clean.cleanup(handle_missing_value=True, link_qc_variables=False)
return qc_var_name
def create_qc_variable(
self, var_name,
flag_type=False,
flag_values_set_value=0,
qc_var_name=None
):
"""
Method to create a quality control variable in the dataset.
Will try not to destroy the qc variable by appending numbers
to the variable name if needed.
Parameters
----------
var_name : str
Data variable name.
flag_type : boolean
If an integer flag type should be created instead of
bitpacked mask type. Will create flag_values instead of
flag_masks.
flag_values_set_value : int
Initial flag value to use when initializing array.
qc_var_name : str
Optional new quality control variable name. If not set
will create one using \\"qc\\_\\" prepended to the data
variable name. If the name given or created is taken
will append a number that does not have a conflict.
Returns
-------
qc_var_name : str
Name of new quality control variable created.
Examples
--------
.. code-block:: python
from act.tests import EXAMPLE_AOSMET
from act.io.armfiles import read_netcdf
ds = read_netcdf(EXAMPLE_AOSMET)
qc_var_name = ds.qcfilter.create_qc_variable('temperature_ambient')
print(qc_var_name)
print(ds[qc_var_name])
"""
# Make QC variable long name. The variable long_name attribute
# may not exist so catch that error and set to default.
try:
qc_variable_long_name = (
'Quality check results on field: ' + self._ds[var_name].attrs['long_name']
)
except KeyError:
qc_variable_long_name = 'Quality check results for ' + var_name
# Make a new quality control variable name. Check if exists in the
# dataset. If so loop through creation of new name until one is
# found that will not replace existing variable.
if qc_var_name is None:
qc_var_name = 'qc_' + var_name
variable_names = list(self._ds.data_vars)
if qc_var_name in variable_names:
for ii in range(1, 100):
temp_qc_var_name = '_'.join([qc_var_name, str(ii)])
if temp_qc_var_name not in variable_names:
qc_var_name = temp_qc_var_name
break
# Create the QC variable filled with 0 values matching the
# shape of data variable.
try:
qc_data = dask.array.from_array(
np.zeros_like(self._ds[var_name].values, dtype=np.int32),
chunks=self._ds[var_name].data.chunksize,
)
except AttributeError:
qc_data = np.zeros_like(self._ds[var_name].values, dtype=np.int32)
# Updating to use coords instead of dim, which caused a loss of
# attribuets as noted in Issue 347
self._ds[qc_var_name] = xr.DataArray(
data=qc_data,
coords=self._ds[var_name].coords,
attrs={'long_name': qc_variable_long_name, 'units': '1'},
)
# Update if using flag_values and don't want 0 to be default value.
if flag_type and flag_values_set_value != 0:
self._ds[qc_var_name].values = self._ds[qc_var_name].values + int(
flag_values_set_value
)
# Add requried variable attributes.
if flag_type:
self._ds[qc_var_name].attrs['flag_values'] = []
else:
self._ds[qc_var_name].attrs['flag_masks'] = []
self._ds[qc_var_name].attrs['flag_meanings'] = []
self._ds[qc_var_name].attrs['flag_assessments'] = []
self._ds[qc_var_name].attrs['standard_name'] = 'quality_flag'
self.update_ancillary_variable(var_name, qc_var_name=qc_var_name)
return qc_var_name
def update_ancillary_variable(self, var_name, qc_var_name=None):
"""
Method to check if ancillary_variables variable attribute
is set with quality control variable name.
Parameters
----------
var_name : str
Data variable name.
qc_var_name : str
quality control variable name. If not given will attempt
to get the name from data variable ancillary_variables
attribute.
Examples
--------
.. code-block:: python
from act.tests import EXAMPLE_AOSMET
from act.io.armfiles import read_netcdf
ds = read_netcdf(EXAMPLE_AOSMET)
var_name = 'temperature_ambient'
qc_var_name = ds.qcfilter.create_qc_variable(var_name)
del ds[var_name].attrs['ancillary_variables']
ds.qcfilter.update_ancillary_variable(var_name, qc_var_name)
print(ds[var_name].attrs['ancillary_variables'])
"""
if qc_var_name is None:
qc_var_name = self._ds.qcfilter.check_for_ancillary_qc(var_name, add_if_missing=False)
if qc_var_name is None:
return
try:
ancillary_variables = self._ds[var_name].attrs['ancillary_variables']
if qc_var_name not in ancillary_variables:
ancillary_variables = ' '.join([ancillary_variables, qc_var_name])
except KeyError:
ancillary_variables = qc_var_name
self._ds[var_name].attrs['ancillary_variables'] = ancillary_variables
def add_test(
self,
var_name,
index=None,
test_number=None,
test_meaning=None,
test_assessment='Bad',
flag_value=False,
recycle=False,
):
"""
Method to add a new test/filter to a quality control variable.
Parameters
----------
var_name : str
data variable name
index : int, bool, list of int or bool, numpy array, tuple of numpy arrays, None
Indexes into quality control array to set the test bit.
If not set or set to None will not set the test on any
element of the quality control variable but will still
add the test to the flag_masks, flag_meanings and
flag_assessments attributes.
test_number : int
Test number to use. If keyword is not set will use first
available test bit/test number.
test_meaning : str
String describing the test. Will be added to flag_meanings
variable attribute.
test_assessment : str
String describing the test assessment. If not set will use
"Bad" as the string to append to flag_assessments. Will
update to be capitalized.
flag_value : boolean
Switch to use flag_values integer quality control.
recyle : boolean
Option to use number less than next highest test if available. For example
tests 1, 2, 4, 5 are set. Set to true the next test chosen will be 3, else
will be 6.
Returns
-------
test_dict : dict
A dictionary containing information added to the QC
variable.
Examples
--------
.. code-block:: python
result = ds.qcfilter.add_test(var_name, test_meaning='Birds!')
"""
test_dict = {}
if test_meaning is None:
raise ValueError(
'You need to provide a value for test_meaning '
'keyword when calling the add_test method'
)
# This ensures the indexing will work even if given float values.
# Preserves tuples from np.where() or boolean arrays for standard
# python indexing.
if index is not None and not isinstance(index, (np.ndarray, tuple)):
index = np.array(index)
if index.dtype.kind not in np.typecodes['AllInteger']:
index = index.astype(int)
# Ensure assessment is capitalized to be consistent
test_assessment = test_assessment.capitalize()
qc_var_name = self._ds.qcfilter.check_for_ancillary_qc(var_name, flag_type=flag_value)
if test_number is None:
test_number = self._ds.qcfilter.available_bit(qc_var_name, recycle=recycle)
self._ds.qcfilter.set_test(var_name, index, test_number, flag_value)
if flag_value:
try:
self._ds[qc_var_name].attrs['flag_values'].append(test_number)
except KeyError:
self._ds[qc_var_name].attrs['flag_values'] = [test_number]
else:
# Determine if flag_masks test number is too large for current data type.
# If so up convert data type.
flag_masks = np.array(self._ds[qc_var_name].attrs['flag_masks'])
mask_dtype = flag_masks.dtype
if not np.issubdtype(mask_dtype, np.integer):
mask_dtype = np.uint32
if np.iinfo(mask_dtype).max - set_bit(0, test_number) <= -1:
if mask_dtype == np.int8 or mask_dtype == np.uint8:
mask_dtype = np.uint16
elif mask_dtype == np.int16 or mask_dtype == np.uint16:
mask_dtype = np.uint32
elif mask_dtype == np.int32 or mask_dtype == np.uint32:
mask_dtype = np.uint64
flag_masks = flag_masks.astype(mask_dtype)
flag_masks = np.append(flag_masks, np.array(set_bit(0, test_number), dtype=mask_dtype))
self._ds[qc_var_name].attrs['flag_masks'] = list(flag_masks)
try:
self._ds[qc_var_name].attrs['flag_meanings'].append(test_meaning)
except KeyError:
self._ds[qc_var_name].attrs['flag_meanings'] = [test_meaning]
try:
self._ds[qc_var_name].attrs['flag_assessments'].append(test_assessment)
except KeyError:
self._ds[qc_var_name].attrs['flag_assessments'] = [test_assessment]
test_dict['test_number'] = test_number
test_dict['test_meaning'] = test_meaning
test_dict['test_assessment'] = test_assessment
test_dict['qc_variable_name'] = qc_var_name
test_dict['variable_name'] = var_name
return test_dict
def remove_test(
self,
var_name=None,
test_number=None,
qc_var_name=None,
flag_value=False,
flag_values_reset_value=0,
):
"""
Method to remove a test/filter from a quality control variable. Must set
var_name or qc_var_name.
Parameters
----------
var_name : str or None
Data variable name.
test_number : int
Test number to remove.
qc_var_name : str or None
Quality control variable name. Ignored if var_name is set.
flag_value : boolean
Switch to use flag_values integer quality control.
flag_values_reset_value : int
Value to use when resetting a flag_values value to not be set.
Examples
--------
.. code-block:: python
ds.qcfilter.remove_test(var_name, test_number=3)
"""
if test_number is None:
raise ValueError(
'You need to provide a value for test_number '
'keyword when calling the remove_test() method'
)
if var_name is None and qc_var_name is None:
raise ValueError(
'You need to provide a value for var_name or qc_var_name '
'keyword when calling the remove_test() method'
)
if var_name is not None:
qc_var_name = self._ds.qcfilter.check_for_ancillary_qc(var_name)
# Determine which index is using the test number
index = None
if flag_value:
flag_values = self._ds[qc_var_name].attrs['flag_values']
for ii, flag_num in enumerate(flag_values):
if flag_num == test_number:
index = ii
break
else:
flag_masks = self._ds[qc_var_name].attrs['flag_masks']
for ii, bit_num in enumerate(flag_masks):
if parse_bit(bit_num)[0] == test_number:
index = ii
break
# If can't find the index of test return before doing anything.
if index is None:
return
if flag_value:
remove_index = self._ds.qcfilter.get_qc_test_mask(
var_name=var_name,
qc_var_name=qc_var_name,
test_number=test_number,
return_index=True,
flag_value=True,
)
self._ds.qcfilter.unset_test(
var_name=var_name,
qc_var_name=qc_var_name,
index=remove_index,
test_number=test_number,
flag_value=flag_value,
flag_values_reset_value=flag_values_reset_value,
)
del flag_values[index]
self._ds[qc_var_name].attrs['flag_values'] = flag_values
else:
remove_index = self._ds.qcfilter.get_qc_test_mask(
var_name=var_name,
qc_var_name=qc_var_name,
test_number=test_number,
return_index=True,
)
self._ds.qcfilter.unset_test(
var_name=var_name,
qc_var_name=qc_var_name,
index=remove_index,
test_number=test_number,
flag_value=flag_value,
)
if isinstance(flag_masks, list):
del flag_masks[index]
else:
flag_masks = np.delete(flag_masks, index)
self._ds[qc_var_name].attrs['flag_masks'] = flag_masks
flag_meanings = self._ds[qc_var_name].attrs['flag_meanings']
del flag_meanings[index]
self._ds[qc_var_name].attrs['flag_meanings'] = flag_meanings
flag_assessments = self._ds[qc_var_name].attrs['flag_assessments']
del flag_assessments[index]
self._ds[qc_var_name].attrs['flag_assessments'] = flag_assessments
def set_test(self, var_name, index=None, test_number=None, flag_value=False):
"""
Method to set a test/filter in a quality control variable.
Parameters
----------
var_name : str
Data variable name.
index : int or list or numpy array
Index to set test in quality control array. If want to
unset all values will need to pass in index of all values.
test_number : int
Test number to set.
flag_value : boolean
Switch to use flag_values integer quality control.
Examples
--------
.. code-block:: python
index = [0, 1, 2, 30]
ds.qcfilter.set_test(var_name, index=index, test_number=2)
"""
qc_var_name = self._ds.qcfilter.check_for_ancillary_qc(var_name)
qc_variable = np.array(self._ds[qc_var_name].values)
# Ensure the qc_variable data type is integer. This ensures bitwise comparison
# will not cause an error.
if qc_variable.dtype.kind not in np.typecodes['AllInteger']:
qc_variable = qc_variable.astype(int)
# Determine if test number is too large for current data type. If so
# up convert data type.
dtype = qc_variable.dtype
if np.iinfo(dtype).max - set_bit(0, test_number) < -1:
if dtype == np.int8:
dtype = np.int16
elif dtype == np.int16:
dtype = np.int32
elif dtype == np.int32:
dtype = np.int64
qc_variable = qc_variable.astype(dtype)
if index is not None:
if flag_value:
qc_variable[index] = test_number
else:
qc_variable[index] = set_bit(qc_variable[index], test_number)
self._ds[qc_var_name].values = qc_variable
def unset_test(
self,
var_name=None,
qc_var_name=None,
index=None,
test_number=None,
flag_value=False,
flag_values_reset_value=0,
):
"""
Method to unset a test/filter from a quality control variable.
Parameters
----------
var_name : str or None
Data variable name.
qc_var_name : str or None
Quality control variable name. Ignored if var_name is set.
index : int or list or numpy array
Index to unset test in quality control array. If want to
unset all values will need to pass in index of all values.
test_number : int
Test number to remove.
flag_value : boolean
Switch to use flag_values integer quality control.
flag_values_reset_value : int
Value to use when resetting a flag_values value to not be set.
Examples
--------
.. code-block:: python
ds.qcfilter.unset_test(var_name, index=range(10, 100), test_number=2)
"""
if index is None:
return
if var_name is None and qc_var_name is None:
raise ValueError(
'You need to provide a value for var_name or qc_var_name '
'keyword when calling the unset_test() method'
)
if var_name is not None:
qc_var_name = self._ds.qcfilter.check_for_ancillary_qc(var_name)
# Get QC variable
qc_variable = self._ds[qc_var_name].values
# Ensure the qc_variable data type is integer. This ensures bitwise comparison
# will not cause an error.
if qc_variable.dtype.kind not in np.typecodes['AllInteger']:
qc_variable = qc_variable.astype(int)
if flag_value:
qc_variable[index] = flag_values_reset_value
else:
qc_variable[index] = unset_bit(qc_variable[index], test_number)
self._ds[qc_var_name].values = qc_variable
def available_bit(self, qc_var_name, recycle=False):
"""
Method to determine next available bit or flag to use with a QC test.
Will check for flag_masks first and if not found will check for
flag_values. This will drive how the next value is chosen.
Parameters
----------
qc_var_name : str
Quality control variable name.
recycle : boolean
Option to look for a bit (test) not in use starting from 1.
If a test is not defined will return the lowest number, else
will just use next highest number.
Returns
-------
test_num : int
Next available test number.
Examples
--------
.. code-block:: python
from act.tests import EXAMPLE_METE40
from act.io.armfiles import read_netcdf
ds = read_netcdf(EXAMPLE_METE40, cleanup_qc=True)
test_number = ds.qcfilter.available_bit('qc_atmos_pressure')
print(test_number)
"""
try:
flag_masks = self._ds[qc_var_name].attrs['flag_masks']
flag_value = False
except KeyError:
try:
flag_masks = self._ds[qc_var_name].attrs['flag_values']
flag_value = True
except KeyError:
try:
self._ds[qc_var_name].attrs['flag_values']
flag_masks = self._ds[qc_var_name].attrs['flag_masks']
flag_value = False
except KeyError:
raise ValueError(
'Problem getting next value from '
'available_bit(). flag_values and '
'flag_masks not set as expected'
)
if flag_masks == []:
next_bit = 1
else:
if flag_value:
if recycle:
next_bit = min(set(range(1, 100000)) - set(flag_masks))
else:
next_bit = max(flag_masks) + 1
else:
if recycle:
tests = [parse_bit(mask)[0] for mask in flag_masks]
next_bit = min(set(range(1, 63)) - set(tests))
else:
next_bit = parse_bit(max(flag_masks))[0] + 1
return int(next_bit)
def get_qc_test_mask(
self,
var_name=None,
test_number=None,
qc_var_name=None,
flag_value=False,
return_index=False,
):
"""
Returns a numpy array of False or True where a particular
flag or bit is set in a numpy array. Must set var_name or qc_var_name
when calling.
Parameters
----------
var_name : str or None
Data variable name.
test_number : int
Test number to return array where test is set.
qc_var_name : str or None
Quality control variable name. Ignored if var_name is set.
flag_value : boolean
Switch to use flag_values integer quality control.
return_index : boolean
Return a numpy array of index numbers into QC array where the
test is set instead of False or True mask.
Returns
-------
test_mask : numpy bool array or numpy integer array
A numpy boolean array with False or True where the test number or
bit was set, or numpy integer array of indexes where test is True.
Examples
--------
.. code-block:: python
from act.io.armfiles import read_netcdf
from act.tests import EXAMPLE_IRT25m20s
ds = read_netcdf(EXAMPLE_IRT25m20s)
var_name = "inst_up_long_dome_resist"
result = ds.qcfilter.add_test(
var_name, index=[0, 1, 2], test_meaning="Birds!"
)
qc_var_name = result["qc_variable_name"]
mask = ds.qcfilter.get_qc_test_mask(
var_name, result["test_number"], return_index=True
)
print(mask)
array([0, 1, 2])
mask = ds.qcfilter.get_qc_test_mask(var_name, result["test_number"])
print(mask)
array([True, True, True, ..., False, False, False])
data = ds[var_name].values
print(data[mask])
array([7.84, 7.8777, 7.8965], dtype=float32)
import numpy as np
data[mask] = np.nan
print(data)
array([nan, nan, nan, ..., 7.6705, 7.6892, 7.6892], dtype=float32)
"""
if var_name is None and qc_var_name is None:
raise ValueError(
'You need to provide a value for var_name or qc_var_name '
'keyword when calling the get_qc_test_mask() method'
)
if test_number is None:
raise ValueError(
'You need to provide a value for test_number '
'keyword when calling the get_qc_test_mask() method'
)
if var_name is not None:
qc_var_name = self._ds.qcfilter.check_for_ancillary_qc(var_name)
qc_variable = self._ds[qc_var_name].values
# Ensure the qc_variable data type is integer. This ensures bitwise comparison
# will not cause an error.
if qc_variable.dtype.kind not in np.typecodes['AllInteger']:
qc_variable = qc_variable.astype(int)
if flag_value:
tripped = qc_variable == test_number
else:
check_bit = set_bit(0, test_number) & qc_variable
tripped = check_bit > 0
test_mask = np.full(qc_variable.shape, False, dtype='bool')
# Make sure test_mask is an array. If qc_variable is scalar will
# be retuned from np.zeros as scalar.
test_mask = np.atleast_1d(test_mask)
test_mask[tripped] = True
test_mask = np.ma.make_mask(test_mask, shrink=False)
if return_index:
test_mask = np.where(test_mask)[0]
return test_mask
def get_masked_data(
self,
var_name,
rm_assessments=None,
rm_tests=None,
return_nan_array=False,
ma_fill_value=None,
return_inverse=False,
):
"""
Returns a numpy masked array containing data and mask or
a numpy float array with masked values set to NaN.
Parameters
----------
var_name : str
Data variable name.
rm_assessments : str or list of str
Assessment name to exclude from returned data.
rm_tests : int or list of int
Test numbers to exclude from returned data. This is the test
number (or bit position number) not the mask number.
return_nan_array : boolean
Return a numpy array with filtered ( or masked) values
set to numpy NaN value. If the data is type int will upconvert
to numpy float to allow setting NaN value.
ma_fill_value : int or float (or str?)
The numpy masked array fill_value used in creation of the the
masked array. If the datatype needs to be upconverted to allow
the fill value to be used, data will be upconverted.
return_inverse : boolean
Invert the masked array mask or return data array where mask is set
to False instead of True set to NaN. Useful for overplotting
where failing.
Returns
-------
variable : numpy masked array or numpy float array
Default is to return a numpy masked array with the mask set to
True where the test with requested assessment or test number
was found set.
If return_nan_array is True will return numpy array upconverted
to float with locations where the test with requested assessment
or test number was found set converted to NaN.
Examples
--------
.. code-block:: python
from act.io.armfiles import read_netcdf
from act.tests import EXAMPLE_IRT25m20s
ds = read_netcdf(EXAMPLE_IRT25m20s)
var_name = "inst_up_long_dome_resist"
result = ds.qcfilter.add_test(
var_name, index=[0, 1, 2], test_meaning="Birds!"
)
data = ds.qcfilter.get_masked_data(
var_name, rm_assessments=["Bad", "Indeterminate"]
)
print(data)
masked_array(
data=[..., 7.670499801635742, 7.689199924468994, 7.689199924468994],
mask=[..., False, False, False],
fill_value=1e20,
dtype=float32,
)
"""
qc_var_name = self._ds.qcfilter.check_for_ancillary_qc(var_name, add_if_missing=False)
flag_value = False
flag_values = None
flag_masks = None
flag_assessments = None
try:
flag_assessments = self._ds[qc_var_name].attrs['flag_assessments']
flag_masks = self._ds[qc_var_name].attrs['flag_masks']
except KeyError:
pass
try:
flag_values = self._ds[qc_var_name].attrs['flag_values']
flag_value = True
except KeyError:
pass
test_numbers = []
if rm_tests is not None:
if isinstance(rm_tests, (int, float, str)):
rm_tests = [int(rm_tests)]
test_numbers.extend(rm_tests)
if rm_assessments is not None:
if isinstance(rm_assessments, str):
rm_assessments = [rm_assessments]
if flag_masks is not None:
test_nums = [parse_bit(mask)[0] for mask in flag_masks]
if flag_values is not None:
test_nums = flag_values
rm_assessments = [x.lower() for x in rm_assessments]
if flag_assessments is not None:
for ii, assessment in enumerate(flag_assessments):
if assessment.lower() in rm_assessments:
test_numbers.append(test_nums[ii])
# Make the list of test numbers to mask unique
test_numbers = list(set(test_numbers))
# Create mask of indexes by looking where each test is set
variable = self._ds[var_name].values
nan_dtype = np.float32
if variable.dtype in (np.float64, np.int64):
nan_dtype = np.float64
mask = np.zeros(variable.shape, dtype=bool)
for test in test_numbers:
mask = mask | self._ds.qcfilter.get_qc_test_mask(var_name, test, flag_value=flag_value)
# Convert data numpy array into masked array
try:
variable = np.ma.array(variable, mask=mask, fill_value=ma_fill_value)
except TypeError:
variable = np.ma.array(
variable,
mask=mask,
fill_value=ma_fill_value,
dtype=np.array(ma_fill_value).dtype,
)
# If requested switch array from where data is not failing tests
# to where data is failing tests. This can be used when over plotting
# where the data if failing the tests.
if return_inverse:
mask = variable.mask
mask = np.invert(mask)
variable.mask = mask
# If asked to return numpy array with values set to NaN
if return_nan_array:
variable = variable.astype(nan_dtype)
variable = variable.filled(fill_value=np.nan)
return variable
def datafilter(
self,
variables=None,
rm_assessments=None,
rm_tests=None,
verbose=False,
del_qc_var=True,
):
"""
Method to apply quality control variables to data variables by
changing the data values in the dataset using quality control variables.
The data is updated with failing data set to
NaN. This can be used to update the data variable in the xarray
dataset for use with xarray methods to perform analysis on the data
since those methods don't read the quality control variables.
Parameters
----------
variables : None or str or list of str
Data variable names to process. If set to None will update all
data variables.
rm_assessments : str or list of str
Assessment names listed under quality control varible flag_assessments
to exclude from returned data. Examples include
['Bad', 'Incorrect', 'Indeterminate', 'Suspect']
rm_tests : int or list of int
Test numbers listed under quality control variable to exclude from
returned data. This is the test
number (or bit position number) not the mask number.
verbose : boolean
Print processing information.
del_qc_var : boolean
Option to delete quality control variable after processing. Since
the data values can not be determined after they are set to NaN
and xarray method processing would also process the quality control
variables, the default is to remove the quality control data
variables.
Examples
--------
.. code-block:: python
from act.io.armfiles import read_netcdf
from act.tests import EXAMPLE_MET1
ds = read_netcdf(EXAMPLE_MET1)
ds.clean.cleanup()
var_name = "atmos_pressure"
ds_1 = ds.nanmean()
ds.qcfilter.add_less_test(var_name, 99, test_assessment="Bad")
ds.qcfilter.datafilter(rm_assessments="Bad")
ds_2 = ds.nanmean()
print("All_data =", ds_1[var_name].values)
All_data = 98.86098
print("Bad_Removed =", ds_2[var_name].values)
Bad_Removed = 99.15148
"""
if rm_assessments is None and rm_tests is None:
raise ValueError('Need to set rm_assessments or rm_tests option')
if variables is not None and isinstance(variables, str):
variables = [variables]
if variables is None:
variables = list(self._ds.data_vars)
for var_name in variables:
qc_var_name = self.check_for_ancillary_qc(var_name, add_if_missing=False, cleanup=False)
if qc_var_name is None:
if verbose:
if var_name in ['base_time', 'time_offset']:
continue
try:
if self._ds[var_name].attrs['standard_name'] == 'quality_flag':
continue
except KeyError:
pass
print(f'No quality control variable for {var_name} found '
f'in call to .qcfilter.datafilter()')
continue
# Need to return data as Numpy array with NaN values. Setting the Dask array
# to Numpy masked array does not work with other tools.
data = self.get_masked_data(
var_name,
rm_assessments=rm_assessments,
rm_tests=rm_tests,
return_nan_array=True
)
# If data was orginally stored as Dask array return values to Dataset as Dask array
# else set as Numpy array.
try:
self._ds[var_name].data = dask.array.from_array(
data, chunks=self._ds[var_name].data.chunksize)
except AttributeError:
self._ds[var_name].values = data
# Adding information on filtering to history attribute
flag_masks = None
flag_assessments = None
flag_meanings = None
try:
flag_assessments = list(self._ds[qc_var_name].attrs['flag_assessments'])
flag_masks = list(self._ds[qc_var_name].attrs['flag_masks'])
flag_meanings = list(self._ds[qc_var_name].attrs['flag_meanings'])
except KeyError:
pass
# Add comment to history for each test that's filtered out
if isinstance(rm_tests, int):
rm_tests = [rm_tests]
if rm_tests is not None:
for test in list(rm_tests):
if test in flag_masks:
index = flag_masks.index(test)
comment = ''.join(['act.qc.datafilter: ', flag_meanings[index]])
if 'history' in self._ds[var_name].attrs.keys():
self._ds[var_name].attrs['history'] += '\n' + comment
else:
self._ds[var_name].attrs['history'] = comment
if isinstance(rm_assessments, str):
rm_assessments = [rm_assessments]
if rm_assessments is not None:
for assessment in rm_assessments:
if assessment in flag_assessments:
index = [i for i, e in enumerate(flag_assessments) if e == assessment]
for ind in index:
comment = ''.join(['act.qc.datafilter: ', flag_meanings[ind]])
if 'history' in self._ds[var_name].attrs.keys():
self._ds[var_name].attrs['history'] += '\n' + comment
else:
self._ds[var_name].attrs['history'] = comment
# If requested delete quality control variable
if del_qc_var:
del self._ds[qc_var_name]
if verbose:
print(f'Deleting {qc_var_name} from dataset')
def set_bit(array, bit_number):
"""
Function to set a quality control bit given a scalar or
array of values and a bit number.
Parameters
----------
array : int list of int or numpy array of int
The bitpacked array to set the bit number.
bit_number : int
The bit (or test) number to set starting at 1.
Returns
-------
array : int, numpy array, tuple, list
Integer or numpy array with bit set for each element of the array.
Returned in same type.
Examples
--------
Example use setting bit 2 to an array called data:
.. code-block:: python
from act.qc.qcfilter import set_bit
data = np.array(range(0, 7))
data = set_bit(data, 2)
print(data)
array([2, 3, 2, 3, 6, 7, 6])
"""
was_list = False
was_tuple = False
if isinstance(array, list):
array = np.array(array)
was_list = True
if isinstance(array, tuple):
array = np.array(array)
was_tuple = True
if bit_number > 0:
array |= 1 << bit_number - 1
if was_list:
array = list(array)
if was_tuple:
array = tuple(array)
return array
def unset_bit(array, bit_number):
"""
Function to remove a quality control bit given a
scalar or array of values and a bit number.
Parameters
----------
array : int list of int or numpy array
Array of integers containing bit packed numbers.
bit_number : int
Bit number to remove starting at 1.
Returns
-------
array : int or numpy array
Returns same data type as array entered with bit removed. Will
fail gracefully if the bit requested to be removed was not set.
Examples
--------
.. code-block:: python
from act.qc.qcfilter import set_bit, unset_bit
data = set_bit([0, 1, 2, 3, 4], 2)
data = set_bit(data, 3)
print(data)
[6, 7, 6, 7, 6]
data = unset_bit(data, 2)
print(data)
[4, 5, 4, 5, 4]
"""
was_list = False
was_tuple = False
if isinstance(array, list):
array = np.array(array)
was_list = True
if isinstance(array, tuple):
array = np.array(array)
was_tuple = True
if bit_number > 0:
array &= ~(1 << bit_number - 1)
if was_list:
array = list(array)
if was_tuple:
array = tuple(array)
return array
def parse_bit(qc_bit):
"""
Given a single integer value, return bit positions.
Parameters
----------
qc_bit : int or numpy int
Bit packed integer number to be parsed.
Returns
-------
bit_number : numpy.int32 array
Array containing all bit numbers of the bit packed number.
If no bits set returns empty array.
Examples
--------
.. code-block:: python
from act.qc.qcfilter import parse_bit
parse_bit(7)
array([1, 2, 3], dtype=int32)
"""
if isinstance(qc_bit, (list, tuple, np.ndarray)):
if len(qc_bit) > 1:
raise ValueError('Must be a single value.')
qc_bit = qc_bit[0]
if qc_bit < 0:
raise ValueError('Must be a positive integer.')
# Convert integer value to single element numpy array of type unsigned integer 64
value = np.array([qc_bit]).astype(">u8")
# Convert value to view containing only unsigned integer 8 data type. This
# is required for the numpy unpackbits function which only works with
# unsigned integer 8 bit data type.
value = value.view("u1")
# Unpack bits using numpy into array of 1 where bit is set and convert into boolean array
index = np.unpackbits(value).astype(bool)
# Create range of numbers from 64 to 1 and subset where unpackbits found a bit set.
bit_number = np.arange(index.size, 0, -1)[index]
# Flip the array to increasing numbers to match historical method
bit_number = np.flip(bit_number)
# bit_number = []
# qc_bit = int(qc_bit)
# counter = 0
# while qc_bit > 0:
# temp_value = qc_bit % 2
# qc_bit = qc_bit >> 1
# counter += 1
# if temp_value == 1:
# bit_number.append(counter)
# Convert data type into expected type
bit_number = np.asarray(bit_number, dtype=np.int32)
return bit_number
<|code_end|>
|
Handling Incorrect ARM DQRs when applied with ACT function
We have implemented a function to query the ARM Data Quality Report database to return time periods when data is flagged. There are three levels of flagging within the DQRs, with two of them able to replace the variable values with NaN. ARM has a lot of DQRs where the list of variables is not accurate, and is over listing the affected variables.
For example a DQR may indicate the instrument is malfunctioning and select all variables in the netCDF file. While the data is incorrect the location, time, and QC variables are correct. Currently we loop over variables and apply the DQR to variables listed in the DQR. Some variables listed are scalars which is causing an exception that we catch and handle. Handling the exception has a print statement indicating the scalar variable could not be indexed.
We should discuss a better way to handle these variables as the print statement may confuse users when there is nothing wrong with the scalar variables. A couple of options include:
1) Correctly handling the replacement of data values with NaN for the scalar variables
2) Setting a keyword to enact the print statement, but set to False as default
3) Skip over these most likely incorrectly listed variables.
An example output for sgpaosccn2colaE13.b1 on 20170903 using D170905.3
```
Skipping 'eta_lookup_table' DQR application because of IndexError
Skipping 'base_time' DQR application because of IndexError
Skipping 'alt' DQR application because of IndexError
Skipping 'droplet_size_bounds' DQR application because of IndexError
Skipping 'lon' DQR application because of IndexError
Skipping 'lat' DQR application because of IndexError
```
|
act/qc/arm.py
<|code_start|>"""
Functions specifically for working with QC/DQRs from
the Atmospheric Radiation Measurement Program (ARM).
"""
import datetime as dt
import numpy as np
import requests
from act.config import DEFAULT_DATASTREAM_NAME
def add_dqr_to_qc(
ds,
variable=None,
assessment='incorrect,suspect',
exclude=None,
include=None,
normalize_assessment=True,
cleanup_qc=True,
dqr_link=False,
):
"""
Function to query the ARM DQR web service for reports and
add as a new quality control test to ancillary quality control
variable. If no anicllary quality control variable exist a new
one will be created and lined to the data variable through
ancillary_variables attribure.
See online documentation from ARM Data
Quality Office on the use of the DQR web service.
https://code.arm.gov/docs/dqrws-examples/wikis/home
Information about the DQR web-service avaible at
https://adc.arm.gov/dqrws/
Parameters
----------
ds : xarray.Dataset
Xarray dataset
variable : string, or list of str, or None
Variables to check DQR web service. If set to None will
attempt to update all variables.
assessment : string
assessment type to get DQRs. Current options include
'missing', 'suspect', 'incorrect' or any combination separated
by a comma.
exclude : list of strings
DQR IDs to exclude from adding into QC
include : list of strings
List of DQR IDs to include in flagging of data. Any other DQR IDs
will be ignored.
normalize_assessment : boolean
The DQR assessment term is different than the embedded QC
term. Embedded QC uses "Bad" and "Indeterminate" while
DQRs use "Incorrect" and "Suspect". Setting this will ensure
the same terms are used for both.
cleanup_qc : boolean
Call clean.cleanup() method to convert to standardized ancillary
quality control variables. Has a little bit of overhead so
if the Dataset has already been cleaned up, no need to run.
dqr_link : boolean
Prints out a link for each DQR to read the full DQR. Defaults to False
Returns
-------
ds : xarray.Dataset
Xarray dataset containing new quality control variables
Examples
--------
.. code-block:: python
from act.qc.arm import add_dqr_to_qc
ds = add_dqr_to_qc(ds, variable=['temp_mean', 'atmos_pressure'])
"""
# DQR Webservice goes off datastreams, pull from the dataset
if 'datastream' in ds.attrs:
datastream = ds.attrs['datastream']
elif '_datastream' in ds.attrs:
datastream = ds.attrs['_datastream']
else:
raise ValueError('Dataset does not have datastream attribute')
if datastream == DEFAULT_DATASTREAM_NAME:
raise ValueError("'datastream' name required for DQR service set to default value "
f"{datastream}. Unable to perform DQR service query.")
# Clean up QC to conform to CF conventions
if cleanup_qc:
ds.clean.cleanup()
# In order to properly flag data, get all variables if None. Exclude QC variables.
if variable is None:
variable = list(set(ds.data_vars) - set(ds.clean.matched_qc_variables))
# Check to ensure variable is list
if not isinstance(variable, (list, tuple)):
variable = [variable]
# Loop through each variable and call web service for that variable
for var_name in variable:
# Create URL
url = 'http://www.archive.arm.gov/dqrws/ARMDQR?datastream='
url += datastream
url += '&varname=' + var_name
url += ''.join(
[
'&searchmetric=',
assessment,
'&dqrfields=dqrid,starttime,endtime,metric,subject',
]
)
# Call web service
req = requests.get(url)
# Check status values and raise error if not successful
status = req.status_code
if status == 400:
raise ValueError('Check parameters')
if status == 500:
raise ValueError('DQR Webservice Temporarily Down')
# Get data and run through each dqr
dqrs = req.text.splitlines()
time = ds['time'].values
dqr_results = {}
for line in dqrs:
line = line.split('|')
dqr_no = line[0]
# Exclude DQRs if in list
if exclude is not None and dqr_no in exclude:
continue
# Only include if in include list
if include is not None and dqr_no not in include:
continue
starttime = np.datetime64(dt.datetime.utcfromtimestamp(int(line[1])))
endtime = np.datetime64(dt.datetime.utcfromtimestamp(int(line[2])))
ind = np.where((time >= starttime) & (time <= endtime))
if ind[0].size == 0:
continue
if dqr_no in dqr_results.keys():
dqr_results[dqr_no]['index'] = np.append(dqr_results[dqr_no]['index'], ind)
else:
dqr_results[dqr_no] = {
'index': ind,
'test_assessment': line[3],
'test_meaning': ': '.join([dqr_no, line[-1]]),
}
if dqr_link:
print_url = 'https://adc.arm.gov/ArchiveServices/DQRService?dqrid=' + str(dqr_no)
print(dqr_no, '-', line[3], ':', print_url)
for key, value in dqr_results.items():
try:
ds.qcfilter.add_test(
var_name,
index=value['index'],
test_meaning=value['test_meaning'],
test_assessment=value['test_assessment'],
)
except IndexError:
print(f"Skipping '{var_name}' DQR application because of IndexError")
if normalize_assessment:
ds.clean.normalize_assessment(variables=var_name)
return ds
<|code_end|>
|
act/qc/arm.py
<|code_start|>"""
Functions specifically for working with QC/DQRs from
the Atmospheric Radiation Measurement Program (ARM).
"""
import datetime as dt
import numpy as np
import requests
from act.config import DEFAULT_DATASTREAM_NAME
def add_dqr_to_qc(
ds,
variable=None,
assessment='incorrect,suspect',
exclude=None,
include=None,
normalize_assessment=True,
cleanup_qc=True,
dqr_link=False,
skip_location_vars=False,
):
"""
Function to query the ARM DQR web service for reports and
add as a new quality control test to ancillary quality control
variable. If no anicllary quality control variable exist a new
one will be created and lined to the data variable through
ancillary_variables attribure.
See online documentation from ARM Data
Quality Office on the use of the DQR web service.
https://code.arm.gov/docs/dqrws-examples/wikis/home
Information about the DQR web-service avaible at
https://adc.arm.gov/dqrws/
Parameters
----------
ds : xarray.Dataset
Xarray dataset
variable : string, or list of str, or None
Variables to check DQR web service. If set to None will
attempt to update all variables.
assessment : string
assessment type to get DQRs. Current options include
'missing', 'suspect', 'incorrect' or any combination separated
by a comma.
exclude : list of strings
DQR IDs to exclude from adding into QC
include : list of strings
List of DQR IDs to include in flagging of data. Any other DQR IDs
will be ignored.
normalize_assessment : boolean
The DQR assessment term is different than the embedded QC
term. Embedded QC uses "Bad" and "Indeterminate" while
DQRs use "Incorrect" and "Suspect". Setting this will ensure
the same terms are used for both.
cleanup_qc : boolean
Call clean.cleanup() method to convert to standardized ancillary
quality control variables. Has a little bit of overhead so
if the Dataset has already been cleaned up, no need to run.
dqr_link : boolean
Prints out a link for each DQR to read the full DQR. Defaults to False
skip_location_vars : boolean
Does not apply DQRs to location variables. This can be useful in the event
the submitter has erroneously selected all variables.
Returns
-------
ds : xarray.Dataset
Xarray dataset containing new quality control variables
Examples
--------
.. code-block:: python
from act.qc.arm import add_dqr_to_qc
ds = add_dqr_to_qc(ds, variable=['temp_mean', 'atmos_pressure'])
"""
# DQR Webservice goes off datastreams, pull from the dataset
if 'datastream' in ds.attrs:
datastream = ds.attrs['datastream']
elif '_datastream' in ds.attrs:
datastream = ds.attrs['_datastream']
else:
raise ValueError('Dataset does not have datastream attribute')
if datastream == DEFAULT_DATASTREAM_NAME:
raise ValueError("'datastream' name required for DQR service set to default value "
f"{datastream}. Unable to perform DQR service query.")
# Clean up QC to conform to CF conventions
if cleanup_qc:
ds.clean.cleanup()
# In order to properly flag data, get all variables if None. Exclude QC variables.
if variable is None:
variable = list(set(ds.data_vars) - set(ds.clean.matched_qc_variables))
# Check to ensure variable is list
if not isinstance(variable, (list, tuple)):
variable = [variable]
# Loop through each variable and call web service for that variable
loc_vars = ['lat', 'lon', 'alt', 'latitude', 'longitude', 'altitude']
for var_name in variable:
if skip_location_vars:
if var_name in loc_vars:
continue
# Create URL
url = 'http://www.archive.arm.gov/dqrws/ARMDQR?datastream='
url += datastream
url += '&varname=' + var_name
url += ''.join(
[
'&searchmetric=',
assessment,
'&dqrfields=dqrid,starttime,endtime,metric,subject',
]
)
# Call web service
req = requests.get(url)
# Check status values and raise error if not successful
status = req.status_code
if status == 400:
raise ValueError('Check parameters')
if status == 500:
raise ValueError('DQR Webservice Temporarily Down')
# Get data and run through each dqr
dqrs = req.text.splitlines()
time = ds['time'].values
dqr_results = {}
for line in dqrs:
line = line.split('|')
dqr_no = line[0]
# Exclude DQRs if in list
if exclude is not None and dqr_no in exclude:
continue
# Only include if in include list
if include is not None and dqr_no not in include:
continue
starttime = np.datetime64(dt.datetime.utcfromtimestamp(int(line[1])))
endtime = np.datetime64(dt.datetime.utcfromtimestamp(int(line[2])))
ind = np.where((time >= starttime) & (time <= endtime))
if ind[0].size == 0:
continue
if 'time' not in ds[var_name].dims:
ind = np.where((ds[var_name].values == ds[var_name].values) | (np.isnan(ds[var_name].values)))
if dqr_no in dqr_results.keys():
dqr_results[dqr_no]['index'] = np.append(dqr_results[dqr_no]['index'], ind)
else:
dqr_results[dqr_no] = {
'index': ind,
'test_assessment': line[3],
'test_meaning': ': '.join([dqr_no, line[-1]]),
}
if dqr_link:
print_url = 'https://adc.arm.gov/ArchiveServices/DQRService?dqrid=' + str(dqr_no)
print(dqr_no, '-', line[3], ':', print_url)
for key, value in dqr_results.items():
try:
ds.qcfilter.add_test(
var_name,
index=value['index'],
test_meaning=value['test_meaning'],
test_assessment=value['test_assessment'],
)
except IndexError:
print(f"Skipping '{var_name}' DQR application because of IndexError")
if normalize_assessment:
ds.clean.normalize_assessment(variables=var_name)
return ds
<|code_end|>
|
Histogram Display Examples
* ACT version: 1.4.5
* Python version: 3.10.8
* Operating System: macOS Ventura 13.3.1
### Description
Within the [histogram display](https://github.com/ARM-DOE/ACT/blob/main/act/plotting/histogramdisplay.py) there are two plotting functions that do not have associated examples within our [Example Gallery](https://arm-doe.github.io/ACT/source/auto_examples/index.html)
These functions are:
- plot_size_distribution
- heat map (though [spatial contour plot](https://arm-doe.github.io/ACT/source/auto_examples/plotting/plot_contour.html#sphx-glr-source-auto-examples-plotting-plot-contour-py) uses this function. Maybe we can add tags to examples for functions used?
|
act/plotting/distributiondisplay.py
<|code_start|>""" Module for Distribution Plotting. """
import matplotlib.pyplot as plt
import numpy as np
import xarray as xr
from ..utils import datetime_utils as dt_utils
from .plot import Display
class DistributionDisplay(Display):
"""
This class is used to make distribution related plots. It is inherited from Display
and therefore contains all of Display's attributes and methods.
Examples
--------
To create a DistributionDisplay with 3 rows, simply do:
.. code-block:: python
ds = act.read_netcdf(the_file)
disp = act.plotting.DistsributionDisplay(ds, subplot_shape=(3,), figsize=(15, 5))
The DistributionDisplay constructor takes in the same keyword arguments as
plt.subplots. For more information on the plt.subplots keyword arguments,
see the `matplotlib documentation
<https://matplotlib.org/api/_as_gen/matplotlib.pyplot.subplots.html>`_.
If no subplot_shape is provided, then no figure or axis will be created
until add_subplots or plots is called.
"""
def __init__(self, ds, subplot_shape=(1,), ds_name=None, **kwargs):
super().__init__(ds, subplot_shape, ds_name, **kwargs)
def set_xrng(self, xrng, subplot_index=(0,)):
"""
Sets the x range of the plot.
Parameters
----------
xrng : 2 number array
The x limits of the plot.
subplot_index : 1 or 2D tuple, list, or array
The index of the subplot to set the x range of.
"""
if self.axes is None:
raise RuntimeError('set_xrng requires the plot to be displayed.')
if not hasattr(self, 'xrng') and len(self.axes.shape) == 2:
self.xrng = np.zeros((self.axes.shape[0], self.axes.shape[1], 2), dtype='datetime64[D]')
elif not hasattr(self, 'xrng') and len(self.axes.shape) == 1:
self.xrng = np.zeros((self.axes.shape[0], 2), dtype='datetime64[D]')
self.axes[subplot_index].set_xlim(xrng)
self.xrng[subplot_index, :] = np.array(xrng)
def set_yrng(self, yrng, subplot_index=(0,)):
"""
Sets the y range of the plot.
Parameters
----------
yrng : 2 number array
The y limits of the plot.
subplot_index : 1 or 2D tuple, list, or array
The index of the subplot to set the x range of.
"""
if self.axes is None:
raise RuntimeError('set_yrng requires the plot to be displayed.')
if not hasattr(self, 'yrng') and len(self.axes.shape) == 2:
self.yrng = np.zeros((self.axes.shape[0], self.axes.shape[1], 2))
elif not hasattr(self, 'yrng') and len(self.axes.shape) == 1:
self.yrng = np.zeros((self.axes.shape[0], 2))
if yrng[0] == yrng[1]:
yrng[1] = yrng[1] + 1
self.axes[subplot_index].set_ylim(yrng)
self.yrng[subplot_index, :] = yrng
def _get_data(self, dsname, fields):
if isinstance(fields, str):
fields = [fields]
return self._ds[dsname][fields].dropna('time')
def plot_stacked_bar_graph(
self,
field,
dsname=None,
bins=10,
sortby_field=None,
sortby_bins=None,
subplot_index=(0,),
set_title=None,
density=False,
hist_kwargs=dict(),
**kwargs,
):
"""
This procedure will plot a stacked bar graph of a histogram.
Parameters
----------
field : str
The name of the field to take the histogram of.
dsname : str or None
The name of the datastream the field is contained in. Set
to None to let ACT automatically determine this.
bins : array-like or int
The histogram bin boundaries to use. If not specified, numpy's
default 10 is used.
sortby_field : str or None
Set this option to a field name in order to sort the histograms
by a given field parameter. For example, one can sort histograms of CO2
concentration by temperature.
sortby_bins : array-like or None
The bins to sort the histograms by.
subplot_index : tuple
The subplot index to place the plot in
set_title : str
The title of the plot.
density : bool
Set to True to plot a p.d.f. instead of a frequency histogram.
hist_kwargs : dict
Additional keyword arguments to pass to numpy histogram.
Other keyword arguments will be passed into :func:`matplotlib.pyplot.bar`.
Returns
-------
return_dict : dict
A dictionary containing the plot axis handle, bin boundaries, and
generated histogram.
"""
if dsname is None and len(self._ds.keys()) > 1:
raise ValueError(
'You must choose a datastream when there are 2 '
+ 'or more datasets in the TimeSeriesDisplay '
+ 'object.'
)
elif dsname is None:
dsname = list(self._ds.keys())[0]
if sortby_field is not None:
ds = self._get_data(dsname, [field, sortby_field])
xdata, ydata = ds[field], ds[sortby_field]
else:
xdata = self._get_data(dsname, field)[field]
if 'units' in xdata.attrs:
xtitle = ''.join(['(', xdata.attrs['units'], ')'])
else:
xtitle = field
if sortby_bins is None and sortby_field is not None:
# We will defaut the y direction to have the same # of bins as x
sortby_bins = np.linspace(ydata.values.min(), ydata.values.max(), len(bins))
# Get the current plotting axis, add day/night background and plot data
if self.fig is None:
self.fig = plt.figure()
if self.axes is None:
self.axes = np.array([plt.axes()])
self.fig.add_axes(self.axes[0])
if sortby_field is not None:
if 'units' in ydata.attrs:
ytitle = ''.join(['(', ydata.attrs['units'], ')'])
else:
ytitle = field
if sortby_bins is None:
my_hist, x_bins, y_bins = np.histogram2d(
xdata.values.flatten(), ydata.values.flatten(), density=density,
bins=bins,
**hist_kwargs)
else:
my_hist, x_bins, y_bins = np.histogram2d(
xdata.values.flatten(),
ydata.values.flatten(),
density=density,
bins=[bins, sortby_bins],
**hist_kwargs)
x_inds = (x_bins[:-1] + x_bins[1:]) / 2.0
self.axes[subplot_index].bar(
x_inds,
my_hist[:, 0].flatten(),
label=(str(y_bins[0]) + ' to ' + str(y_bins[1])),
**kwargs,
)
for i in range(1, len(y_bins) - 1):
self.axes[subplot_index].bar(
x_inds,
my_hist[:, i].flatten(),
bottom=my_hist[:, i - 1],
label=(str(y_bins[i]) + ' to ' + str(y_bins[i + 1])),
**kwargs,
)
self.axes[subplot_index].legend()
else:
my_hist, bins = np.histogram(xdata.values.flatten(), bins=bins,
density=density, **hist_kwargs)
x_inds = (bins[:-1] + bins[1:]) / 2.0
self.axes[subplot_index].bar(x_inds, my_hist)
# Set Title
if set_title is None:
set_title = ' '.join(
[
dsname,
field,
'on',
dt_utils.numpy_to_arm_date(self._ds[dsname].time.values[0]),
]
)
self.axes[subplot_index].set_title(set_title)
self.axes[subplot_index].set_ylabel('count')
self.axes[subplot_index].set_xlabel(xtitle)
return_dict = {}
return_dict['plot_handle'] = self.axes[subplot_index]
if 'x_bins' in locals():
return_dict['x_bins'] = x_bins
return_dict['y_bins'] = y_bins
else:
return_dict['bins'] = bins
return_dict['histogram'] = my_hist
return return_dict
def plot_size_distribution(
self, field, bins, time=None, dsname=None, subplot_index=(0,), set_title=None, **kwargs
):
"""
This procedure plots a stairstep plot of a size distribution. This is
useful for plotting size distributions and waveforms.
Parameters
----------
field : str
The name of the field to plot the spectrum from.
bins : str or array-like
The name of the field that stores the bins for the spectra.
time : none or datetime
If None, spectra to plot will be automatically determined.
Otherwise, specify this field for the time period to plot.
dsname : str
The name of the Dataset to plot. Set to None to have
ACT automatically determine this.
subplot_index : tuple
The subplot index to place the plot in.
set_title : str or None
Use this to set the title.
Additional keyword arguments will be passed into :func:`matplotlib.pyplot.step`
Returns
-------
ax : matplotlib axis handle
The matplotlib axis handle referring to the plot.
"""
if dsname is None and len(self._ds.keys()) > 1:
raise ValueError(
'You must choose a datastream when there are 2 '
+ 'or more datasets in the TimeSeriesDisplay '
+ 'object.'
)
elif dsname is None:
dsname = list(self._ds.keys())[0]
xdata = self._get_data(dsname, field)[field]
if isinstance(bins, str):
bins = self._ds[dsname][bins]
else:
bins = xr.DataArray(bins)
if 'units' in bins.attrs:
xtitle = ''.join(['(', bins.attrs['units'], ')'])
else:
xtitle = 'Bin #'
if 'units' in xdata.attrs:
ytitle = ''.join(['(', xdata.attrs['units'], ')'])
else:
ytitle = field
if len(xdata.dims) > 1 and time is None:
raise ValueError(
'Input data has more than one dimension, ' + 'you must specify a time to plot!'
)
elif len(xdata.dims) > 1:
xdata = xdata.sel(time=time, method='nearest')
if len(bins.dims) > 1 or len(bins.values) != len(xdata.values):
raise ValueError(
'Bins must be a one dimensional field whose '
+ 'length is equal to the field length!'
)
# Get the current plotting axis, add day/night background and plot data
if self.fig is None:
self.fig = plt.figure()
if self.axes is None:
self.axes = np.array([plt.axes()])
self.fig.add_axes(self.axes[0])
# Set Title
if set_title is None:
set_title = ' '.join(
[
dsname,
field,
'on',
dt_utils.numpy_to_arm_date(self._ds[dsname].time.values[0]),
]
)
self.axes[subplot_index].set_title(set_title)
self.axes[subplot_index].step(bins.values, xdata.values, **kwargs)
self.axes[subplot_index].set_xlabel(xtitle)
self.axes[subplot_index].set_ylabel(ytitle)
return self.axes[subplot_index]
def plot_stairstep_graph(
self,
field,
dsname=None,
bins=10,
sortby_field=None,
sortby_bins=None,
subplot_index=(0,),
set_title=None,
density=False,
hist_kwargs=dict(),
**kwargs,
):
"""
This procedure will plot a stairstep plot of a histogram.
Parameters
----------
field : str
The name of the field to take the histogram of.
dsname : str or None
The name of the datastream the field is contained in. Set
to None to let ACT automatically determine this.
bins : array-like or int
The histogram bin boundaries to use. If not specified, numpy's
default 10 is used.
sortby_field : str or None
Set this option to a field name in order to sort the histograms
by a given field parameter. For example, one can sort histograms of CO2
concentration by temperature.
sortby_bins : array-like or None
The bins to sort the histograms by.
subplot_index : tuple
The subplot index to place the plot in.
set_title : str
The title of the plot.
density : bool
Set to True to plot a p.d.f. instead of a frequency histogram.
hist_kwargs : dict
Additional keyword arguments to pass to numpy histogram.
Other keyword arguments will be passed into :func:`matplotlib.pyplot.step`.
Returns
-------
return_dict : dict
A dictionary containing the plot axis handle, bin boundaries, and
generated histogram.
"""
if dsname is None and len(self._ds.keys()) > 1:
raise ValueError(
'You must choose a datastream when there are 2 '
+ 'or more datasets in the TimeSeriesDisplay '
+ 'object.'
)
elif dsname is None:
dsname = list(self._ds.keys())[0]
xdata = self._get_data(dsname, field)[field]
if 'units' in xdata.attrs:
xtitle = ''.join(['(', xdata.attrs['units'], ')'])
else:
xtitle = field
if sortby_field is not None:
ydata = self._ds[dsname][sortby_field]
if sortby_bins is None and sortby_field is not None:
# We will defaut the y direction to have the same # of bins as x
sortby_bins = np.linspace(ydata.values.min(), ydata.values.max(), len(bins))
# Get the current plotting axis, add day/night background and plot data
if self.fig is None:
self.fig = plt.figure()
if self.axes is None:
self.axes = np.array([plt.axes()])
self.fig.add_axes(self.axes[0])
if sortby_field is not None:
if 'units' in ydata.attrs:
ytitle = ''.join(['(', ydata.attrs['units'], ')'])
else:
ytitle = field
if sortby_bins is None:
my_hist, x_bins, y_bins = np.histogram2d(
xdata.values.flatten(), ydata.values.flatten(), bins=bins,
density=density, **hist_kwargs)
else:
my_hist, x_bins, y_bins = np.histogram2d(
xdata.values.flatten(),
ydata.values.flatten(),
density=density,
bins=[bins, sortby_bins],
**hist_kwargs
)
x_inds = (x_bins[:-1] + x_bins[1:]) / 2.0
self.axes[subplot_index].step(
x_inds,
my_hist[:, 0].flatten(),
label=(str(y_bins[0]) + ' to ' + str(y_bins[1])),
**kwargs,
)
for i in range(1, len(y_bins) - 1):
self.axes[subplot_index].step(
x_inds,
my_hist[:, i].flatten(),
label=(str(y_bins[i]) + ' to ' + str(y_bins[i + 1])),
**kwargs,
)
self.axes[subplot_index].legend()
else:
my_hist, bins = np.histogram(xdata.values.flatten(), bins=bins,
density=density, **hist_kwargs)
x_inds = (bins[:-1] + bins[1:]) / 2.0
self.axes[subplot_index].step(x_inds, my_hist, **kwargs)
# Set Title
if set_title is None:
set_title = ' '.join(
[
dsname,
field,
'on',
dt_utils.numpy_to_arm_date(self._ds[dsname].time.values[0]),
]
)
self.axes[subplot_index].set_title(set_title)
self.axes[subplot_index].set_ylabel('count')
self.axes[subplot_index].set_xlabel(xtitle)
return_dict = {}
return_dict['plot_handle'] = self.axes[subplot_index]
if 'x_bins' in locals():
return_dict['x_bins'] = x_bins
return_dict['y_bins'] = y_bins
else:
return_dict['bins'] = bins
return_dict['histogram'] = my_hist
return return_dict
def plot_heatmap(
self,
x_field,
y_field,
dsname=None,
x_bins=None,
y_bins=None,
subplot_index=(0,),
set_title=None,
density=False,
set_shading='auto',
hist_kwargs=dict(),
**kwargs,
):
"""
This procedure will plot a heatmap of a histogram from 2 variables.
Parameters
----------
x_field : str
The name of the field to take the histogram of on the X axis.
y_field : str
The name of the field to take the histogram of on the Y axis.
dsname : str or None
The name of the datastream the field is contained in. Set
to None to let ACT automatically determine this.
x_bins : array-like or None
The histogram bin boundaries to use for the variable on the X axis.
Set to None to use numpy's default boundaries.
y_bins : array-like or None
The histogram bin boundaries to use for the variable on the Y axis.
Set to None to use numpy's default boundaries.
subplot_index : tuple
The subplot index to place the plot in
set_title : str
The title of the plot.
density : bool
Set to True to plot a p.d.f. instead of a frequency histogram.
set_shading : string
Option to to set the matplotlib.pcolormesh shading parameter.
Default to 'auto'
hist_kwargs : Additional keyword arguments to pass to numpy histogram.
Other keyword arguments will be passed into :func:`matplotlib.pyplot.pcolormesh`.
Returns
-------
return_dict : dict
A dictionary containing the plot axis handle, bin boundaries, and
generated histogram.
"""
if dsname is None and len(self._ds.keys()) > 1:
raise ValueError(
'You must choose a datastream when there are 2 '
'or more datasets in the TimeSeriesDisplay '
'object.'
)
elif dsname is None:
dsname = list(self._ds.keys())[0]
ds = self._get_data(dsname, [x_field, y_field])
xdata, ydata = ds[x_field], ds[y_field]
if 'units' in xdata.attrs:
xtitle = ''.join(['(', xdata.attrs['units'], ')'])
else:
xtitle = x_field
if x_bins is not None and y_bins is None:
# We will defaut the y direction to have the same # of bins as x
y_bins = np.linspace(ydata.values.min(), ydata.values.max(), len(x_bins))
# Get the current plotting axis, add day/night background and plot data
if self.fig is None:
self.fig = plt.figure()
if self.axes is None:
self.axes = np.array([plt.axes()])
self.fig.add_axes(self.axes[0])
if 'units' in ydata.attrs:
ytitle = ''.join(['(', ydata.attrs['units'], ')'])
else:
ytitle = y_field
if x_bins is None:
my_hist, x_bins, y_bins = np.histogram2d(
xdata.values.flatten(), ydata.values.flatten(), density=density,
**hist_kwargs)
else:
my_hist, x_bins, y_bins = np.histogram2d(
xdata.values.flatten(),
ydata.values.flatten(),
density=density,
bins=[x_bins, y_bins],
**hist_kwargs
)
x_inds = (x_bins[:-1] + x_bins[1:]) / 2.0
y_inds = (y_bins[:-1] + y_bins[1:]) / 2.0
xi, yi = np.meshgrid(x_inds, y_inds, indexing='ij')
mesh = self.axes[subplot_index].pcolormesh(xi, yi, my_hist, shading=set_shading, **kwargs)
# Set Title
if set_title is None:
set_title = ' '.join(
[
dsname,
'on',
dt_utils.numpy_to_arm_date(self._ds[dsname].time.values[0]),
]
)
self.axes[subplot_index].set_title(set_title)
self.axes[subplot_index].set_ylabel(ytitle)
self.axes[subplot_index].set_xlabel(xtitle)
self.add_colorbar(mesh, title='count', subplot_index=subplot_index)
return_dict = {}
return_dict['plot_handle'] = self.axes[subplot_index]
return_dict['x_bins'] = x_bins
return_dict['y_bins'] = y_bins
return_dict['histogram'] = my_hist
return return_dict
def set_ratio_line(self, subplot_index=(0, )):
"""
Sets the 1:1 ratio line.
Parameters
----------
subplot_index : 1 or 2D tuple, list, or array
The index of the subplot to set the x range of.
"""
if self.axes is None:
raise RuntimeError('set_ratio_line requires the plot to be displayed.')
# Define the xticks of the figure
xlims = self.axes[subplot_index].get_xticks()
ratio = np.linspace(xlims[0], xlims[-1])
self.axes[subplot_index].plot(ratio, ratio, 'k--')
def scatter(
self,
x_field,
y_field,
m_field=None,
dsname=None,
cbar_label=None,
set_title=None,
subplot_index=(0,),
**kwargs,
):
"""
This procedure will produce a scatter plot from 2 variables.
Parameters
----------
x_field : str
The name of the field to display on the X axis.
y_field : str
The name of the field to display on the Y axis.
m_field : str
The name of the field to display on the markers.
cbar_label : str
The desired name to plot for the colorbar
set_title : str
The desired title for the plot.
Default title is created from the datastream.
dsname : str or None
The name of the datastream the field is contained in. Set
to None to let ACT automatically determine this.
subplot_index : tuple
The subplot index to place the plot in
Other keyword arguments will be passed into :func:`matplotlib.pyplot.scatter`.
Returns
-------
ax : matplotlib axis handle
The matplotlib axis handle of the plot
"""
if dsname is None and len(self._ds.keys()) > 1:
raise ValueError(
'You must choose a datastream when there are 2 '
'or more datasets in the TimeSeriesDisplay '
'object.'
)
elif dsname is None:
dsname = list(self._ds.keys())[0]
if m_field is None:
mdata = None
ds = self._get_data(dsname, [x_field, y_field])
xdata, ydata = ds[x_field], ds[y_field]
else:
ds = self._get_data(dsname, [x_field, y_field, m_field])
xdata, ydata, mdata = ds[x_field], ds[y_field], ds[m_field]
# Define the x-axis label. If units are avaiable, plot.
if 'units' in xdata.attrs:
xtitle = x_field + ''.join([' (', xdata.attrs['units'], ')'])
else:
xtitle = x_field
# Define the y-axis label. If units are available, plot
if 'units' in ydata.attrs:
ytitle = y_field + ''.join([' (', ydata.attrs['units'], ')'])
else:
ytitle = y_field
# Get the current plotting axis, add day/night background and plot data
if self.fig is None:
self.fig = plt.figure()
# Define the axes for the figure
if self.axes is None:
self.axes = np.array([plt.axes()])
self.fig.add_axes(self.axes[0])
# Display the scatter plot, pass keyword args for unspecified attributes
scc = self.axes[subplot_index].scatter(xdata,
ydata,
c=mdata,
**kwargs
)
# Set Title
if set_title is None:
set_title = ' '.join(
[
dsname,
'on',
dt_utils.numpy_to_arm_date(self._ds[dsname].time.values[0]),
]
)
# Check to see if a colorbar label was set
if mdata is not None:
if cbar_label is None:
# Define the y-axis label. If units are available, plot
if 'units' in ydata.attrs:
ztitle = m_field + ''.join([' (', mdata.attrs['units'], ')'])
else:
ztitle = m_field
else:
ztitle = cbar_label
# Plot the colorbar
cbar = plt.colorbar(scc)
cbar.ax.set_ylabel(ztitle)
# Define the axe title, x-axis label, y-axis label
self.axes[subplot_index].set_title(set_title)
self.axes[subplot_index].set_ylabel(ytitle)
self.axes[subplot_index].set_xlabel(xtitle)
return self.axes[subplot_index]
def violin(self,
field,
positions=None,
dsname=None,
vert=True,
showmeans=True,
showmedians=True,
showextrema=True,
subplot_index=(0,),
set_title=None,
**kwargs,
):
"""
This procedure will produce a violin plot for the selected
field (or fields).
Parameters
----------
field : str or list
The name of the field (or fields) to display on the X axis.
positions : array-like, Default: None
The positions of the ticks along dependent axis.
dsname : str or None
The name of the datastream the field is contained in. Set
to None to let ACT automatically determine this.
vert : Boolean, Default: True
Display violin plot vertical. False will display horizontal.
showmeans : Boolean; Default: False
If True, will display the mean of the datastream.
showmedians : Boolean; Default: False
If True, will display the medium of the datastream.
showextrema: Boolean; Default: False
If True, will display the extremes of the datastream.
subplot_index : tuple
The subplot index to place the plot in
set_title : str
The title of the plot.
Other keyword arguments will be passed into :func:`matplotlib.pyplot.violinplot`.
Returns
-------
ax : matplotlib axis handle
The matplotlib axis handle of the plot
"""
if dsname is None and len(self._ds.keys()) > 1:
raise ValueError(
'You must choose a datastream when there are 2 '
'or more datasets in the TimeSeriesDisplay '
'object.'
)
if dsname is None:
dsname = list(self._ds.keys())[0]
ds = self._get_data(dsname, field)
ndata = ds[field]
# Get the current plotting axis, add day/night background and plot data
if self.fig is None:
self.fig = plt.figure()
# Define the axes for the figure
if self.axes is None:
self.axes = np.array([plt.axes()])
self.fig.add_axes(self.axes[0])
# Define the axe label. If units are avaiable, plot.
if 'units' in ndata.attrs:
axtitle = field + ''.join([' (', ndata.attrs['units'], ')'])
else:
axtitle = field
# Display the scatter plot, pass keyword args for unspecified attributes
scc = self.axes[subplot_index].violinplot(ndata,
positions=positions,
vert=vert,
showmeans=showmeans,
showmedians=showmedians,
showextrema=showextrema,
**kwargs
)
if showmeans is True:
scc['cmeans'].set_edgecolor('red')
scc['cmeans'].set_label('mean')
if showmedians is True:
scc['cmedians'].set_edgecolor('black')
scc['cmedians'].set_label('median')
# Set Title
if set_title is None:
set_title = ' '.join(
[
dsname,
'on',
dt_utils.numpy_to_arm_date(self._ds[dsname].time.values[0]),
]
)
# Define the axe title, x-axis label, y-axis label
self.axes[subplot_index].set_title(set_title)
if vert is True:
self.axes[subplot_index].set_ylabel(axtitle)
if positions is None:
self.axes[subplot_index].set_xticks([])
else:
self.axes[subplot_index].set_xlabel(axtitle)
if positions is None:
self.axes[subplot_index].set_yticks([])
return self.axes[subplot_index]
<|code_end|>
examples/plotting/plot_heatmap.py
<|code_start|><|code_end|>
examples/plotting/plot_size_distribution.py
<|code_start|><|code_end|>
|
act/plotting/distributiondisplay.py
<|code_start|>""" Module for Distribution Plotting. """
import matplotlib.pyplot as plt
import numpy as np
import xarray as xr
import pandas as pd
from ..utils import datetime_utils as dt_utils
from .plot import Display
class DistributionDisplay(Display):
"""
This class is used to make distribution related plots. It is inherited from Display
and therefore contains all of Display's attributes and methods.
Examples
--------
To create a DistributionDisplay with 3 rows, simply do:
.. code-block:: python
ds = act.read_netcdf(the_file)
disp = act.plotting.DistsributionDisplay(ds, subplot_shape=(3,), figsize=(15, 5))
The DistributionDisplay constructor takes in the same keyword arguments as
plt.subplots. For more information on the plt.subplots keyword arguments,
see the `matplotlib documentation
<https://matplotlib.org/api/_as_gen/matplotlib.pyplot.subplots.html>`_.
If no subplot_shape is provided, then no figure or axis will be created
until add_subplots or plots is called.
"""
def __init__(self, ds, subplot_shape=(1,), ds_name=None, **kwargs):
super().__init__(ds, subplot_shape, ds_name, **kwargs)
def set_xrng(self, xrng, subplot_index=(0,)):
"""
Sets the x range of the plot.
Parameters
----------
xrng : 2 number array
The x limits of the plot.
subplot_index : 1 or 2D tuple, list, or array
The index of the subplot to set the x range of.
"""
if self.axes is None:
raise RuntimeError('set_xrng requires the plot to be displayed.')
if not hasattr(self, 'xrng') and len(self.axes.shape) == 2:
self.xrng = np.zeros((self.axes.shape[0], self.axes.shape[1], 2), dtype='datetime64[D]')
elif not hasattr(self, 'xrng') and len(self.axes.shape) == 1:
self.xrng = np.zeros((self.axes.shape[0], 2), dtype='datetime64[D]')
self.axes[subplot_index].set_xlim(xrng)
self.xrng[subplot_index, :] = np.array(xrng)
def set_yrng(self, yrng, subplot_index=(0,)):
"""
Sets the y range of the plot.
Parameters
----------
yrng : 2 number array
The y limits of the plot.
subplot_index : 1 or 2D tuple, list, or array
The index of the subplot to set the x range of.
"""
if self.axes is None:
raise RuntimeError('set_yrng requires the plot to be displayed.')
if not hasattr(self, 'yrng') and len(self.axes.shape) == 2:
self.yrng = np.zeros((self.axes.shape[0], self.axes.shape[1], 2))
elif not hasattr(self, 'yrng') and len(self.axes.shape) == 1:
self.yrng = np.zeros((self.axes.shape[0], 2))
if yrng[0] == yrng[1]:
yrng[1] = yrng[1] + 1
self.axes[subplot_index].set_ylim(yrng)
self.yrng[subplot_index, :] = yrng
def _get_data(self, dsname, fields):
if isinstance(fields, str):
fields = [fields]
return self._ds[dsname][fields].dropna('time')
def plot_stacked_bar_graph(
self,
field,
dsname=None,
bins=10,
sortby_field=None,
sortby_bins=None,
subplot_index=(0,),
set_title=None,
density=False,
hist_kwargs=dict(),
**kwargs,
):
"""
This procedure will plot a stacked bar graph of a histogram.
Parameters
----------
field : str
The name of the field to take the histogram of.
dsname : str or None
The name of the datastream the field is contained in. Set
to None to let ACT automatically determine this.
bins : array-like or int
The histogram bin boundaries to use. If not specified, numpy's
default 10 is used.
sortby_field : str or None
Set this option to a field name in order to sort the histograms
by a given field parameter. For example, one can sort histograms of CO2
concentration by temperature.
sortby_bins : array-like or None
The bins to sort the histograms by.
subplot_index : tuple
The subplot index to place the plot in
set_title : str
The title of the plot.
density : bool
Set to True to plot a p.d.f. instead of a frequency histogram.
hist_kwargs : dict
Additional keyword arguments to pass to numpy histogram.
Other keyword arguments will be passed into :func:`matplotlib.pyplot.bar`.
Returns
-------
return_dict : dict
A dictionary containing the plot axis handle, bin boundaries, and
generated histogram.
"""
if dsname is None and len(self._ds.keys()) > 1:
raise ValueError(
'You must choose a datastream when there are 2 '
+ 'or more datasets in the TimeSeriesDisplay '
+ 'object.'
)
elif dsname is None:
dsname = list(self._ds.keys())[0]
if sortby_field is not None:
ds = self._get_data(dsname, [field, sortby_field])
xdata, ydata = ds[field], ds[sortby_field]
else:
xdata = self._get_data(dsname, field)[field]
if 'units' in xdata.attrs:
xtitle = ''.join(['(', xdata.attrs['units'], ')'])
else:
xtitle = field
if sortby_bins is None and sortby_field is not None:
# We will defaut the y direction to have the same # of bins as x
sortby_bins = np.linspace(ydata.values.min(), ydata.values.max(), len(bins))
# Get the current plotting axis, add day/night background and plot data
if self.fig is None:
self.fig = plt.figure()
if self.axes is None:
self.axes = np.array([plt.axes()])
self.fig.add_axes(self.axes[0])
if sortby_field is not None:
if 'units' in ydata.attrs:
ytitle = ''.join(['(', ydata.attrs['units'], ')'])
else:
ytitle = field
if sortby_bins is None:
my_hist, x_bins, y_bins = np.histogram2d(
xdata.values.flatten(), ydata.values.flatten(), density=density,
bins=bins,
**hist_kwargs)
else:
my_hist, x_bins, y_bins = np.histogram2d(
xdata.values.flatten(),
ydata.values.flatten(),
density=density,
bins=[bins, sortby_bins],
**hist_kwargs)
x_inds = (x_bins[:-1] + x_bins[1:]) / 2.0
self.axes[subplot_index].bar(
x_inds,
my_hist[:, 0].flatten(),
label=(str(y_bins[0]) + ' to ' + str(y_bins[1])),
**kwargs,
)
for i in range(1, len(y_bins) - 1):
self.axes[subplot_index].bar(
x_inds,
my_hist[:, i].flatten(),
bottom=my_hist[:, i - 1],
label=(str(y_bins[i]) + ' to ' + str(y_bins[i + 1])),
**kwargs,
)
self.axes[subplot_index].legend()
else:
my_hist, bins = np.histogram(xdata.values.flatten(), bins=bins,
density=density, **hist_kwargs)
x_inds = (bins[:-1] + bins[1:]) / 2.0
self.axes[subplot_index].bar(x_inds, my_hist)
# Set Title
if set_title is None:
set_title = ' '.join(
[
dsname,
field,
'on',
dt_utils.numpy_to_arm_date(self._ds[dsname].time.values[0]),
]
)
self.axes[subplot_index].set_title(set_title)
self.axes[subplot_index].set_ylabel('count')
self.axes[subplot_index].set_xlabel(xtitle)
return_dict = {}
return_dict['plot_handle'] = self.axes[subplot_index]
if 'x_bins' in locals():
return_dict['x_bins'] = x_bins
return_dict['y_bins'] = y_bins
else:
return_dict['bins'] = bins
return_dict['histogram'] = my_hist
return return_dict
def plot_size_distribution(
self, field, bins, time=None, dsname=None, subplot_index=(0,), set_title=None, **kwargs
):
"""
This procedure plots a stairstep plot of a size distribution. This is
useful for plotting size distributions and waveforms.
Parameters
----------
field : str
The name of the field to plot the spectrum from.
bins : str or array-like
The name of the field that stores the bins for the spectra.
time : none or datetime
If None, spectra to plot will be automatically determined.
Otherwise, specify this field for the time period to plot.
dsname : str
The name of the Dataset to plot. Set to None to have
ACT automatically determine this.
subplot_index : tuple
The subplot index to place the plot in.
set_title : str or None
Use this to set the title.
Additional keyword arguments will be passed into :func:`matplotlib.pyplot.step`
Returns
-------
ax : matplotlib axis handle
The matplotlib axis handle referring to the plot.
"""
if dsname is None and len(self._ds.keys()) > 1:
raise ValueError(
'You must choose a datastream when there are 2 '
+ 'or more datasets in the TimeSeriesDisplay '
+ 'object.'
)
elif dsname is None:
dsname = list(self._ds.keys())[0]
xdata = self._get_data(dsname, field)[field]
if isinstance(bins, str):
bins = self._ds[dsname][bins]
else:
bins = xr.DataArray(bins)
if 'units' in bins.attrs:
xtitle = ''.join(['(', bins.attrs['units'], ')'])
else:
xtitle = 'Bin #'
if 'units' in xdata.attrs:
ytitle = ''.join(['(', xdata.attrs['units'], ')'])
else:
ytitle = field
if len(xdata.dims) > 1 and time is None:
raise ValueError(
'Input data has more than one dimension, ' + 'you must specify a time to plot!'
)
elif len(xdata.dims) > 1:
xdata = xdata.sel(time=time, method='nearest')
if len(bins.dims) > 1 or len(bins.values) != len(xdata.values):
raise ValueError(
'Bins must be a one dimensional field whose '
+ 'length is equal to the field length!'
)
# Get the current plotting axis, add day/night background and plot data
if self.fig is None:
self.fig = plt.figure()
if self.axes is None:
self.axes = np.array([plt.axes()])
self.fig.add_axes(self.axes[0])
# Set Title
if set_title is None:
set_title = ' '.join(
[
dsname,
field,
'on',
dt_utils.numpy_to_arm_date(self._ds[dsname].time.values[0]),
]
)
if time is not None:
t = pd.Timestamp(time)
set_title += ''.join([' at ', ':'.join([str(t.hour), str(t.minute), str(t.second)])])
self.axes[subplot_index].set_title(set_title)
self.axes[subplot_index].step(bins.values, xdata.values, **kwargs)
self.axes[subplot_index].set_xlabel(xtitle)
self.axes[subplot_index].set_ylabel(ytitle)
return self.axes[subplot_index]
def plot_stairstep_graph(
self,
field,
dsname=None,
bins=10,
sortby_field=None,
sortby_bins=None,
subplot_index=(0,),
set_title=None,
density=False,
hist_kwargs=dict(),
**kwargs,
):
"""
This procedure will plot a stairstep plot of a histogram.
Parameters
----------
field : str
The name of the field to take the histogram of.
dsname : str or None
The name of the datastream the field is contained in. Set
to None to let ACT automatically determine this.
bins : array-like or int
The histogram bin boundaries to use. If not specified, numpy's
default 10 is used.
sortby_field : str or None
Set this option to a field name in order to sort the histograms
by a given field parameter. For example, one can sort histograms of CO2
concentration by temperature.
sortby_bins : array-like or None
The bins to sort the histograms by.
subplot_index : tuple
The subplot index to place the plot in.
set_title : str
The title of the plot.
density : bool
Set to True to plot a p.d.f. instead of a frequency histogram.
hist_kwargs : dict
Additional keyword arguments to pass to numpy histogram.
Other keyword arguments will be passed into :func:`matplotlib.pyplot.step`.
Returns
-------
return_dict : dict
A dictionary containing the plot axis handle, bin boundaries, and
generated histogram.
"""
if dsname is None and len(self._ds.keys()) > 1:
raise ValueError(
'You must choose a datastream when there are 2 '
+ 'or more datasets in the TimeSeriesDisplay '
+ 'object.'
)
elif dsname is None:
dsname = list(self._ds.keys())[0]
xdata = self._get_data(dsname, field)[field]
if 'units' in xdata.attrs:
xtitle = ''.join(['(', xdata.attrs['units'], ')'])
else:
xtitle = field
if sortby_field is not None:
ydata = self._ds[dsname][sortby_field]
if sortby_bins is None and sortby_field is not None:
# We will defaut the y direction to have the same # of bins as x
sortby_bins = np.linspace(ydata.values.min(), ydata.values.max(), len(bins))
# Get the current plotting axis, add day/night background and plot data
if self.fig is None:
self.fig = plt.figure()
if self.axes is None:
self.axes = np.array([plt.axes()])
self.fig.add_axes(self.axes[0])
if sortby_field is not None:
if 'units' in ydata.attrs:
ytitle = ''.join(['(', ydata.attrs['units'], ')'])
else:
ytitle = field
if sortby_bins is None:
my_hist, x_bins, y_bins = np.histogram2d(
xdata.values.flatten(), ydata.values.flatten(), bins=bins,
density=density, **hist_kwargs)
else:
my_hist, x_bins, y_bins = np.histogram2d(
xdata.values.flatten(),
ydata.values.flatten(),
density=density,
bins=[bins, sortby_bins],
**hist_kwargs
)
x_inds = (x_bins[:-1] + x_bins[1:]) / 2.0
self.axes[subplot_index].step(
x_inds,
my_hist[:, 0].flatten(),
label=(str(y_bins[0]) + ' to ' + str(y_bins[1])),
**kwargs,
)
for i in range(1, len(y_bins) - 1):
self.axes[subplot_index].step(
x_inds,
my_hist[:, i].flatten(),
label=(str(y_bins[i]) + ' to ' + str(y_bins[i + 1])),
**kwargs,
)
self.axes[subplot_index].legend()
else:
my_hist, bins = np.histogram(xdata.values.flatten(), bins=bins,
density=density, **hist_kwargs)
x_inds = (bins[:-1] + bins[1:]) / 2.0
self.axes[subplot_index].step(x_inds, my_hist, **kwargs)
# Set Title
if set_title is None:
set_title = ' '.join(
[
dsname,
field,
'on',
dt_utils.numpy_to_arm_date(self._ds[dsname].time.values[0]),
]
)
self.axes[subplot_index].set_title(set_title)
self.axes[subplot_index].set_ylabel('count')
self.axes[subplot_index].set_xlabel(xtitle)
return_dict = {}
return_dict['plot_handle'] = self.axes[subplot_index]
if 'x_bins' in locals():
return_dict['x_bins'] = x_bins
return_dict['y_bins'] = y_bins
else:
return_dict['bins'] = bins
return_dict['histogram'] = my_hist
return return_dict
def plot_heatmap(
self,
x_field,
y_field,
dsname=None,
x_bins=None,
y_bins=None,
subplot_index=(0,),
set_title=None,
density=False,
set_shading='auto',
hist_kwargs=dict(),
threshold=None,
**kwargs,
):
"""
This procedure will plot a heatmap of a histogram from 2 variables.
Parameters
----------
x_field : str
The name of the field to take the histogram of on the X axis.
y_field : str
The name of the field to take the histogram of on the Y axis.
dsname : str or None
The name of the datastream the field is contained in. Set
to None to let ACT automatically determine this.
x_bins : array-like, int, or None
The histogram bin boundaries to use for the variable on the X axis.
Set to None to use numpy's default boundaries.
If an int, will indicate the number of bins to use
y_bins : array-like, int, or None
The histogram bin boundaries to use for the variable on the Y axis.
Set to None to use numpy's default boundaries.
If an int, will indicate the number of bins to use
subplot_index : tuple
The subplot index to place the plot in
set_title : str
The title of the plot.
density : bool
Set to True to plot a p.d.f. instead of a frequency histogram.
set_shading : string
Option to to set the matplotlib.pcolormesh shading parameter.
Default to 'auto'
threshold : float
Value on which to threshold the histogram results for plotting.
Setting to 0 will ensure that all 0 values are removed from the plot
making it easier to distringuish between 0 and low values
hist_kwargs : Additional keyword arguments to pass to numpy histogram.
Other keyword arguments will be passed into :func:`matplotlib.pyplot.pcolormesh`.
Returns
-------
return_dict : dict
A dictionary containing the plot axis handle, bin boundaries, and
generated histogram.
"""
if dsname is None and len(self._ds.keys()) > 1:
raise ValueError(
'You must choose a datastream when there are 2 '
'or more datasets in the TimeSeriesDisplay '
'object.'
)
elif dsname is None:
dsname = list(self._ds.keys())[0]
ds = self._get_data(dsname, [x_field, y_field])
xdata, ydata = ds[x_field], ds[y_field]
if 'units' in xdata.attrs:
xtitle = ''.join(['(', xdata.attrs['units'], ')'])
else:
xtitle = x_field
if x_bins is not None and isinstance(x_bins, int):
x_bins = np.linspace(xdata.values.min(), xdata.values.max(), x_bins)
if y_bins is not None and isinstance(x_bins, int):
y_bins = np.linspace(ydata.values.min(), ydata.values.max(), y_bins)
if x_bins is not None and y_bins is None:
# We will defaut the y direction to have the same # of bins as x
y_bins = np.linspace(ydata.values.min(), ydata.values.max(), len(x_bins))
# Get the current plotting axis, add day/night background and plot data
if self.fig is None:
self.fig = plt.figure()
if self.axes is None:
self.axes = np.array([plt.axes()])
self.fig.add_axes(self.axes[0])
if 'units' in ydata.attrs:
ytitle = ''.join(['(', ydata.attrs['units'], ')'])
else:
ytitle = y_field
if x_bins is None:
my_hist, x_bins, y_bins = np.histogram2d(
xdata.values.flatten(), ydata.values.flatten(), density=density,
**hist_kwargs)
else:
my_hist, x_bins, y_bins = np.histogram2d(
xdata.values.flatten(),
ydata.values.flatten(),
density=density,
bins=[x_bins, y_bins],
**hist_kwargs
)
# Adding in the ability to threshold the heatmaps
if threshold is not None:
my_hist[my_hist <= threshold] = np.nan
x_inds = (x_bins[:-1] + x_bins[1:]) / 2.0
y_inds = (y_bins[:-1] + y_bins[1:]) / 2.0
xi, yi = np.meshgrid(x_inds, y_inds, indexing='ij')
mesh = self.axes[subplot_index].pcolormesh(xi, yi, my_hist, shading=set_shading, **kwargs)
# Set Title
if set_title is None:
set_title = ' '.join(
[
dsname,
'on',
dt_utils.numpy_to_arm_date(self._ds[dsname].time.values[0]),
]
)
self.axes[subplot_index].set_title(set_title)
self.axes[subplot_index].set_ylabel(ytitle)
self.axes[subplot_index].set_xlabel(xtitle)
self.add_colorbar(mesh, title='count', subplot_index=subplot_index)
return_dict = {}
return_dict['plot_handle'] = self.axes[subplot_index]
return_dict['x_bins'] = x_bins
return_dict['y_bins'] = y_bins
return_dict['histogram'] = my_hist
return return_dict
def set_ratio_line(self, subplot_index=(0, )):
"""
Sets the 1:1 ratio line.
Parameters
----------
subplot_index : 1 or 2D tuple, list, or array
The index of the subplot to set the x range of.
"""
if self.axes is None:
raise RuntimeError('set_ratio_line requires the plot to be displayed.')
# Define the xticks of the figure
xlims = self.axes[subplot_index].get_xticks()
ratio = np.linspace(xlims[0], xlims[-1])
self.axes[subplot_index].plot(ratio, ratio, 'k--')
def scatter(
self,
x_field,
y_field,
m_field=None,
dsname=None,
cbar_label=None,
set_title=None,
subplot_index=(0,),
**kwargs,
):
"""
This procedure will produce a scatter plot from 2 variables.
Parameters
----------
x_field : str
The name of the field to display on the X axis.
y_field : str
The name of the field to display on the Y axis.
m_field : str
The name of the field to display on the markers.
cbar_label : str
The desired name to plot for the colorbar
set_title : str
The desired title for the plot.
Default title is created from the datastream.
dsname : str or None
The name of the datastream the field is contained in. Set
to None to let ACT automatically determine this.
subplot_index : tuple
The subplot index to place the plot in
Other keyword arguments will be passed into :func:`matplotlib.pyplot.scatter`.
Returns
-------
ax : matplotlib axis handle
The matplotlib axis handle of the plot
"""
if dsname is None and len(self._ds.keys()) > 1:
raise ValueError(
'You must choose a datastream when there are 2 '
'or more datasets in the TimeSeriesDisplay '
'object.'
)
elif dsname is None:
dsname = list(self._ds.keys())[0]
if m_field is None:
mdata = None
ds = self._get_data(dsname, [x_field, y_field])
xdata, ydata = ds[x_field], ds[y_field]
else:
ds = self._get_data(dsname, [x_field, y_field, m_field])
xdata, ydata, mdata = ds[x_field], ds[y_field], ds[m_field]
# Define the x-axis label. If units are avaiable, plot.
if 'units' in xdata.attrs:
xtitle = x_field + ''.join([' (', xdata.attrs['units'], ')'])
else:
xtitle = x_field
# Define the y-axis label. If units are available, plot
if 'units' in ydata.attrs:
ytitle = y_field + ''.join([' (', ydata.attrs['units'], ')'])
else:
ytitle = y_field
# Get the current plotting axis, add day/night background and plot data
if self.fig is None:
self.fig = plt.figure()
# Define the axes for the figure
if self.axes is None:
self.axes = np.array([plt.axes()])
self.fig.add_axes(self.axes[0])
# Display the scatter plot, pass keyword args for unspecified attributes
scc = self.axes[subplot_index].scatter(xdata,
ydata,
c=mdata,
**kwargs
)
# Set Title
if set_title is None:
set_title = ' '.join(
[
dsname,
'on',
dt_utils.numpy_to_arm_date(self._ds[dsname].time.values[0]),
]
)
# Check to see if a colorbar label was set
if mdata is not None:
if cbar_label is None:
# Define the y-axis label. If units are available, plot
if 'units' in mdata.attrs:
ztitle = m_field + ''.join([' (', mdata.attrs['units'], ')'])
else:
ztitle = m_field
else:
ztitle = cbar_label
# Plot the colorbar
cbar = plt.colorbar(scc)
cbar.ax.set_ylabel(ztitle)
# Define the axe title, x-axis label, y-axis label
self.axes[subplot_index].set_title(set_title)
self.axes[subplot_index].set_ylabel(ytitle)
self.axes[subplot_index].set_xlabel(xtitle)
return self.axes[subplot_index]
def violin(self,
field,
positions=None,
dsname=None,
vert=True,
showmeans=True,
showmedians=True,
showextrema=True,
subplot_index=(0,),
set_title=None,
**kwargs,
):
"""
This procedure will produce a violin plot for the selected
field (or fields).
Parameters
----------
field : str or list
The name of the field (or fields) to display on the X axis.
positions : array-like, Default: None
The positions of the ticks along dependent axis.
dsname : str or None
The name of the datastream the field is contained in. Set
to None to let ACT automatically determine this.
vert : Boolean, Default: True
Display violin plot vertical. False will display horizontal.
showmeans : Boolean; Default: False
If True, will display the mean of the datastream.
showmedians : Boolean; Default: False
If True, will display the medium of the datastream.
showextrema: Boolean; Default: False
If True, will display the extremes of the datastream.
subplot_index : tuple
The subplot index to place the plot in
set_title : str
The title of the plot.
Other keyword arguments will be passed into :func:`matplotlib.pyplot.violinplot`.
Returns
-------
ax : matplotlib axis handle
The matplotlib axis handle of the plot
"""
if dsname is None and len(self._ds.keys()) > 1:
raise ValueError(
'You must choose a datastream when there are 2 '
'or more datasets in the TimeSeriesDisplay '
'object.'
)
if dsname is None:
dsname = list(self._ds.keys())[0]
ds = self._get_data(dsname, field)
ndata = ds[field]
# Get the current plotting axis, add day/night background and plot data
if self.fig is None:
self.fig = plt.figure()
# Define the axes for the figure
if self.axes is None:
self.axes = np.array([plt.axes()])
self.fig.add_axes(self.axes[0])
# Define the axe label. If units are avaiable, plot.
if 'units' in ndata.attrs:
axtitle = field + ''.join([' (', ndata.attrs['units'], ')'])
else:
axtitle = field
# Display the scatter plot, pass keyword args for unspecified attributes
scc = self.axes[subplot_index].violinplot(ndata,
positions=positions,
vert=vert,
showmeans=showmeans,
showmedians=showmedians,
showextrema=showextrema,
**kwargs
)
if showmeans is True:
scc['cmeans'].set_edgecolor('red')
scc['cmeans'].set_label('mean')
if showmedians is True:
scc['cmedians'].set_edgecolor('black')
scc['cmedians'].set_label('median')
# Set Title
if set_title is None:
set_title = ' '.join(
[
dsname,
'on',
dt_utils.numpy_to_arm_date(self._ds[dsname].time.values[0]),
]
)
# Define the axe title, x-axis label, y-axis label
self.axes[subplot_index].set_title(set_title)
if vert is True:
self.axes[subplot_index].set_ylabel(axtitle)
if positions is None:
self.axes[subplot_index].set_xticks([])
else:
self.axes[subplot_index].set_xlabel(axtitle)
if positions is None:
self.axes[subplot_index].set_yticks([])
return self.axes[subplot_index]
<|code_end|>
examples/plotting/plot_heatmap.py
<|code_start|>"""
Example plot using heat maps
----------------------------
Compare MET temperature and RH using a heatmap
Author: Adam Theisen
"""
import act
import matplotlib.pyplot as plt
# Read MET data in from the test data area
ds = act.io.armfiles.read_netcdf(act.tests.EXAMPLE_MET_WILDCARD)
# Create a DistributionDisplay object to compare fields
display = act.plotting.DistributionDisplay(ds, subplot_shape=(1, 2), figsize=(12, 5))
# Plot a heatmap and scatter plot up of RH vs Temperature
# Set the number of bins for the x-axis to 25 and y to 20
title = 'Heatmap of MET RH vs Temp'
display.plot_heatmap('temp_mean', 'rh_mean', x_bins=25, y_bins=20,
threshold=0, subplot_index=(0, 0), set_title=title)
# Plot the scatter plot and shade by wind_speed
title = 'Scatter plot of MET RH vs Temp'
display.scatter('temp_mean', 'rh_mean', subplot_index=(0, 1), set_title=title, m_field='time')
plt.show()
<|code_end|>
examples/plotting/plot_size_distribution.py
<|code_start|>"""
Example plot using heat maps
----------------------------
Compare MET temperature and RH using a heatmap
Author: Adam Theisen
"""
import act
import matplotlib.pyplot as plt
import numpy as np
# Read CCN data in from the test data area
ds = act.io.armfiles.read_netcdf(act.tests.EXAMPLE_CCN)
# Create a DistributionDisplay object
display = act.plotting.DistributionDisplay(ds, subplot_shape=(2,), figsize=(12, 10))
# Create a size distribution plot while plotting the
# size distribution in the second plot
t_ind = np.datetime64('2017-09-03T15:47:31')
display.plot_size_distribution('N_CCN_dN', 'droplet_size', time=t_ind, subplot_index=(0,))
# This part shows how you can use different display types in a single plot
# by assigning the new display object to a figure and axes from the first one.
display2 = act.plotting.TimeSeriesDisplay(ds)
display2.assign_to_figure_axis(display.fig, display.axes[1])
display2.plot('N_CCN_dN')
plt.show()
<|code_end|>
|
ADD: 'extract_arm_file_info' to harvest info from lists of ARM
filenames without loading the files, thereby saving header reading and processing times
<!-- Please remove check-list items that aren't relevant to your changes -->
- [ ] Documentation reflects changes
|
act/utils/data_utils.py
<|code_start|>"""
Module containing utilities for the data.
"""
import importlib
import warnings
import metpy
import numpy as np
import pint
import scipy.stats as stats
import xarray as xr
spec = importlib.util.find_spec('pyart')
if spec is not None:
PYART_AVAILABLE = True
else:
PYART_AVAILABLE = False
@xr.register_dataset_accessor('utils')
class ChangeUnits:
"""
Class for updating units in the dataset. Data values and units attribute
are updated in place. Coordinate variables can not be updated in place. Must
use new returned dataset when updating coordinage varibles.
"""
def __init__(self, ds):
self._ds = ds
def change_units(
self, variables=None, desired_unit=None, skip_variables=None, skip_standard=True
):
"""
Parameters
----------
variables : None, str or list of str
Variable names to attempt to change units.
desired_unit : str
Desired udunits unit string.
skip_variables : None, str or list of str
Variable names to skip. Works well when not providing a variables
keyword.
skip_standard : boolean
Flag indicating the QC variables that will not need changing are
skipped. Makes the processing faster when processing all variables
in dataset.
Returns
-------
dataset : xarray.dataset
A new dataset if the coordinate variables are updated. Required to
use returned dataset if coordinage variabels are updated,
otherwise the dataset is updated in place.
"""
if variables is not None and isinstance(variables, str):
variables = [variables]
if skip_variables is not None and isinstance(skip_variables, str):
skip_variables = [skip_variables]
if desired_unit is None:
raise ValueError("Need to provide 'desired_unit' keyword for .change_units() method")
if variables is None:
variables = list(self._ds.data_vars)
if skip_variables is not None:
variables = list(set(variables) - set(skip_variables))
for var_name in variables:
try:
if self._ds[var_name].attrs['standard_name'] == 'quality_flag':
continue
except KeyError:
pass
try:
data = convert_units(
self._ds[var_name].values,
self._ds[var_name].attrs['units'],
desired_unit,
)
try:
self._ds[var_name].values = data
self._ds[var_name].attrs['units'] = desired_unit
except ValueError:
attrs = self._ds[var_name].attrs
self._ds = self._ds.assign_coords({var_name: data})
attrs['units'] = desired_unit
self._ds[var_name].attrs = attrs
except (
KeyError,
pint.errors.DimensionalityError,
pint.errors.UndefinedUnitError,
np.core._exceptions.UFuncTypeError,
):
continue
return self._ds
def assign_coordinates(ds, coord_list):
"""
This procedure will create a new ACT dataset whose coordinates are
designated to be the variables in a given list. This helps make data
slicing via xarray and visualization easier.
Parameters
----------
ds : ACT Dataset
The ACT Dataset to modify the coordinates of.
coord_list : dict
The list of variables to assign as coordinates, given as a dictionary
whose keys are the variable name and values are the dimension name.
Returns
-------
new_ds : ACT Dataset
The new ACT Dataset with the coordinates assigned to be the given
variables.
"""
# Check to make sure that user assigned valid entries for coordinates
for coord in coord_list.keys():
if coord not in ds.variables.keys():
raise KeyError(coord + ' is not a variable in the Dataset.')
if ds.dims[coord_list[coord]] != len(ds.variables[coord]):
raise IndexError(
coord + ' must have the same ' + 'value as length of ' + coord_list[coord]
)
new_ds_dict = {}
for variable in ds.variables.keys():
my_coord_dict = {}
dataarray = ds[variable]
if len(dataarray.dims) > 0:
for coord in coord_list.keys():
if coord_list[coord] in dataarray.dims:
my_coord_dict[coord_list[coord]] = ds[coord]
if variable not in my_coord_dict.keys() and variable not in ds.dims:
the_dataarray = xr.DataArray(dataarray.data, coords=my_coord_dict, dims=dataarray.dims)
new_ds_dict[variable] = the_dataarray
new_ds = xr.Dataset(new_ds_dict, coords=my_coord_dict)
return new_ds
def add_in_nan(time, data):
"""
This procedure adds in NaNs when there is a larger than expected time step.
This is useful for timeseries where there is a gap in data and need a
NaN value to stop plotting from connecting data over the large data gap.
Parameters
----------
time : 1D array of numpy datetime64 or Xarray DataArray of datetime64
Times in the timeseries.
data : 1D or 2D numpy array or Xarray DataArray
Array containing the data. The 0 axis corresponds to time.
Returns
-------
time : numpy array or Xarray DataArray
The array containing the new times including a NaN filled
sampe or slice if multi-dimensional.
The intervals are determined by the mode of the timestep in *time*.
data : numpy array or Xarray DataArray
The array containing the NaN-indserted data.
"""
time_is_DataArray = False
data_is_DataArray = False
if isinstance(time, xr.core.dataarray.DataArray):
time_is_DataArray = True
time_attributes = time.attrs
time_dims = time.dims
if isinstance(data, xr.core.dataarray.DataArray):
data_is_DataArray = True
data_attributes = data.attrs
data_dims = data.dims
# Return if time dimension is only size one since we can't do differences.
if time.size > 2:
data = np.asarray(data)
time = np.asarray(time)
# Not sure if we need to set to second data type to make it work better.
# Leaving code in here in case we need to update.
# diff = np.diff(time.astype('datetime64[s]'), 1)
diff = np.diff(time, 1)
# Wrapping in a try to catch error while switching between numpy 1.10 to 1.11
try:
mode = stats.mode(diff, keepdims=True).mode[0]
except TypeError:
mode = stats.mode(diff).mode[0]
index = np.where(diff > (2.0 * mode))
offset = 0
for i in index[0]:
corr_i = i + offset
time_added = time[corr_i] + (time[corr_i + 1] - time[corr_i]) / 2.0
time = np.insert(time, corr_i + 1, time_added)
data = np.insert(data, corr_i + 1, np.nan, axis=0)
offset += 1
if time_is_DataArray:
time = xr.DataArray(time, attrs=time_attributes, dims=time_dims)
if data_is_DataArray:
data = xr.DataArray(data, attrs=data_attributes, dims=data_dims)
return time, data
def get_missing_value(
ds,
variable,
default=-9999,
add_if_missing_in_ds=False,
use_FillValue=False,
nodefault=False,
):
"""
Function to get missing value from missing_value or _FillValue attribute.
Works well with catching errors and allows for a default value when a
missing value is not listed in the dataset. You may get strange results
becaus xarray will automatically convert all missing_value or
_FillValue to NaN and then remove the missing_value and
_FillValue variable attribute when reading data with default settings.
Parameters
----------
ds : xarray.Dataset
Xarray dataset containing data variable.
variable : str
Variable name to use for getting missing value.
default : int or float
Default value to use if missing value attribute is not in dataset.
add_if_missing_in_ds : bool
Boolean to add to the dataset if does not exist. Default is False.
use_FillValue : bool
Boolean to use _FillValue instead of missing_value. If missing_value
does exist and _FillValue does not will add _FillValue
set to missing_value value.
nodefault : bool
Option to use this to check if the varible has a missing value set and
do not want to get default as return. If the missing value is found
will return, else will return None.
Returns
-------
missing : scalar int or float (or None)
Value used to indicate missing value matching type of data or None if
nodefault keyword set to True.
Examples
--------
.. code-block:: python
from act.utils import get_missing_value
missing = get_missing_value(dq_ds, "temp_mean")
print(missing)
-9999.0
"""
in_ds = False
if use_FillValue:
missing_atts = ['_FillValue', 'missing_value']
else:
missing_atts = ['missing_value', '_FillValue']
for att in missing_atts:
try:
missing = ds[variable].attrs[att]
in_ds = True
break
except (AttributeError, KeyError):
missing = default
# Check if do not want a default value retured and a value
# was not fund.
if nodefault is True and in_ds is False:
missing = None
return missing
# Check data type and try to match missing_value to the data type of data
try:
missing = ds[variable].data.dtype.type(missing)
except KeyError:
pass
except AttributeError:
print(
('--- AttributeError: Issue trying to get data type ' + 'from "{}" data ---').format(
variable
)
)
# If requested add missing value to the dataset
if add_if_missing_in_ds and not in_ds:
try:
ds[variable].attrs[missing_atts[0]] = missing
except KeyError:
print(
('--- KeyError: Issue trying to add "{}" ' + 'attribute to "{}" ---').format(
missing_atts[0], variable
)
)
return missing
def convert_units(data, in_units, out_units):
"""
Wrapper function around library to convert data using unit strings.
Currently using pint units library. Will attempt to preserve numpy
data type, but will upconvert to numpy float64 if need to change
data type for converted values.
Parameters
----------
data : list, tuple or numpy array
Data array to be modified.
in_units : str
Units scalar string of input data array.
out_units : str
Units scalar string of desired output data array.
Returns
-------
data : numpy array
Data array converted into new units.
Examples
--------
> data = np.array([1,2,3,4,5,6])
> data = convert_units(data, 'cm', 'm')
> data
array([0.01, 0.02, 0.03, 0.04, 0.05, 0.06])
"""
# Fix historical and current incorrect usage of units.
convert_dict = {
'C': 'degC',
'F': 'degF',
'%': 'percent', # seems like pint does not like this symbol?
'1': 'unitless', # seems like pint does not like this number?
}
if in_units in convert_dict:
in_units = convert_dict[in_units]
if out_units in convert_dict:
out_units = convert_dict[out_units]
if in_units == out_units:
return data
# Instantiate the registry
ureg = pint.UnitRegistry(autoconvert_offset_to_baseunit=True)
# Add missing units
ureg.define('percent = 0.01*count = %')
ureg.define('unitless = count = 1')
if not isinstance(data, np.ndarray):
data = np.array(data)
data_type = data.dtype
data_type_kind = data.dtype.kind
# Do the conversion magic
data = (data * ureg(in_units)).to(out_units)
data = data.magnitude
# The data type may be changed by pint. This is a side effect
# of pint changing the datatype to float. Check if the converted values
# need float precision. If so leave, if not change back to orginal
# precision after checking if the precsion is not lost with the orginal
# data type.
if (
data_type_kind == 'i'
and np.nanmin(data) >= np.iinfo(data_type).min
and np.nanmax(data) <= np.iinfo(data_type).max
and np.all(np.mod(data, 1) == 0)
):
data = data.astype(data_type)
return data
def ts_weighted_average(ts_dict):
"""
Program to take in multiple difference time-series and average them
using the weights provided. This assumes that the variables passed in
all have the same units. Please see example gallery for an example.
NOTE: All weights should add up to 1
Parameters
----------
ts_dict : dict
Dictionary containing datastream, variable, weight, and datasets
.. code-block:: python
t_dict = {
"sgpvdisC1.b1": {
"variable": "rain_rate",
"weight": 0.05,
"ds": ds,
},
"sgpmetE13.b1": {
"variable": [
"tbrg_precip_total",
"org_precip_rate_mean",
"pwd_precip_rate_mean_1min",
],
"weight": [0.25, 0.05, 0.0125],
},
}
Returns
-------
data : numpy array
Variable of time-series averaged data
"""
# Run through each datastream/variable and get data
da_array = []
data = 0.0
for d in ts_dict:
for i, v in enumerate(ts_dict[d]['variable']):
new_name = '_'.join([d, v])
# Since many variables may have same name, rename with datastream
da = ts_dict[d]['ds'][v].rename(new_name)
# Apply Weights to Data
da.values = da.values * ts_dict[d]['weight'][i]
da_array.append(da)
da = xr.merge(da_array)
# Stack all the data into a 2D time series
data = None
for i, d in enumerate(da):
if i == 0:
data = da[d].values
else:
data = np.vstack((data, da[d].values))
# Sum data across each time sample
data = np.nansum(data, 0)
# Add data to data array and return
dims = ts_dict[list(ts_dict.keys())[0]]['ds'].dims
da_xr = xr.DataArray(
data,
dims=dims,
coords={'time': ts_dict[list(ts_dict.keys())[0]]['ds']['time']},
)
da_xr.attrs['long_name'] = 'Weighted average of ' + ', '.join(list(ts_dict.keys()))
return da_xr
def accumulate_precip(ds, variable, time_delta=None):
"""
Program to accumulate rain rates from an act xarray dataset and insert
variable back into an act xarray dataset with "_accumulated" appended to
the variable name. Please verify that your units are accurately described
in the data.
Parameters
----------
ds : xarray.DataSet
ACT Xarray dataset.
variable : string
Variable name.
time_delta : float
Time delta to caculate precip accumulations over.
Useful if full time series is not passed in.
Returns
-------
ds : xarray.DataSet
ACT Xarray dataset with variable_accumulated.
"""
# Get Data, time, and metadat
data = ds[variable]
time = ds.coords['time']
units = ds[variable].attrs['units']
# Calculate mode of the time samples(i.e. 1 min vs 1 sec)
if time_delta is None:
diff = np.diff(time.values, 1) / np.timedelta64(1, 's')
try:
t_delta = stats.mode(diff, keepdims=False).mode
except TypeError:
t_delta = stats.mode(diff).mode
else:
t_delta = time_delta
# Calculate the accumulation based on the units
t_factor = t_delta / 60.0
if units == 'mm/hr':
data = data * (t_factor / 60.0)
accum = np.nancumsum(data.values)
# Add accumulated variable back to the dataset
long_name = 'Accumulated precipitation'
attrs = {'long_name': long_name, 'units': 'mm'}
ds['_'.join([variable, 'accumulated'])] = xr.DataArray(
accum, coords=ds[variable].coords, attrs=attrs
)
return ds
def create_pyart_obj(
ds,
variables=None,
sweep=None,
azimuth=None,
elevation=None,
range_var=None,
sweep_start=None,
sweep_end=None,
lat=None,
lon=None,
alt=None,
sweep_mode='ppi',
sweep_az_thresh=10.0,
sweep_el_thresh=0.5,
):
"""
Produces a Py-ART radar object based on data in the ACT Xarray dataset.
Parameters
----------
ds : xarray.DataSet
ACT Xarray dataset.
variables : list
List of variables to add to the radar object, will default to all
variables.
sweep : string
Name of variable that has sweep information. If none, will try and
calculate from the azimuth and elevation.
azimuth : string
Name of azimuth variable. Will try and find one if none given.
elevation : string
Name of elevation variable. Will try and find one if none given.
range_var : string
Name of the range variable. Will try and find one if none given.
sweep_start : string
Name of variable with sweep start indices.
sweep_end : string
Name of variable with sweep end indices.
lat : string
Name of latitude variable. Will try and find one if none given.
lon : string
Name of longitude variable. Will try and find one if none given.
alt : string
Name of altitude variable. Will try and find one if none given.
sweep_mode : string
Type of scan. Defaults to PPI.
sweep_az_thresh : float
If calculating sweep numbers, the maximum change in azimuth before new
sweep.
sweep_el_thresh : float
If calculating sweep numbers, the maximum change in elevation before
new sweep.
Returns
-------
radar : radar.Radar
Py-ART Radar Object.
"""
if not PYART_AVAILABLE:
raise ImportError(
'Py-ART needs to be installed on your system to convert to ' 'Py-ART Object.'
)
else:
import pyart
# Get list of variables if none provided
if variables is None:
variables = list(ds.keys())
# Determine the sweeps if not already in a variable$a
if sweep is None:
swp = np.zeros(ds.sizes['time'])
for key in ds.variables.keys():
if len(ds.variables[key].shape) == 2:
total_rays = ds.variables[key].shape[0]
break
nsweeps = int(total_rays / ds.variables['time'].shape[0])
else:
swp = ds[sweep].values
nsweeps = ds[sweep].values
# Get coordinate variables
if lat is None:
lat = [s for s in variables if 'latitude' in s]
if len(lat) == 0:
lat = [s for s in variables if 'lat' in s]
if len(lat) == 0:
raise ValueError(
'Latitude variable not set and could not be ' 'discerned from the data.'
)
else:
lat = lat[0]
if lon is None:
lon = [s for s in variables if 'longitude' in s]
if len(lon) == 0:
lon = [s for s in variables if 'lon' in s]
if len(lon) == 0:
raise ValueError(
'Longitude variable not set and could not be ' 'discerned from the data.'
)
else:
lon = lon[0]
if alt is None:
alt = [s for s in variables if 'altitude' in s]
if len(alt) == 0:
alt = [s for s in variables if 'alt' in s]
if len(alt) == 0:
raise ValueError(
'Altitude variable not set and could not be ' 'discerned from the data.'
)
else:
alt = alt[0]
# Get additional variable names if none provided
if azimuth is None:
azimuth = [s for s in sorted(variables) if 'azimuth' in s][0]
if len(azimuth) == 0:
raise ValueError(
'Azimuth variable not set and could not be ' 'discerned from the data.'
)
if elevation is None:
elevation = [s for s in sorted(variables) if 'elevation' in s][0]
if len(elevation) == 0:
raise ValueError(
'Elevation variable not set and could not be ' 'discerned from the data.'
)
if range_var is None:
range_var = [s for s in sorted(variables) if 'range' in s][0]
if len(range_var) == 0:
raise ValueError('Range variable not set and could not be ' 'discerned from the data.')
# Calculate the sweep indices if not passed in
if sweep_start is None and sweep_end is None:
az_diff = np.abs(np.diff(ds[azimuth].values))
az_idx = az_diff > sweep_az_thresh
el_diff = np.abs(np.diff(ds[elevation].values))
el_idx = el_diff > sweep_el_thresh
# Create index list
az_index = list(np.where(az_idx)[0] + 1)
el_index = list(np.where(el_idx)[0] + 1)
index = sorted(az_index + el_index)
index.insert(0, 0)
index += [ds.sizes['time']]
sweep_start_index = []
sweep_end_index = []
for i in range(len(index) - 1):
sweep_start_index.append(index[i])
sweep_end_index.append(index[i + 1] - 1)
swp[index[i] : index[i + 1]] = i
else:
sweep_start_index = ds[sweep_start].values
sweep_end_index = ds[sweep_end].values
if sweep is None:
for i in range(len(sweep_start_index)):
swp[sweep_start_index[i] : sweep_end_index[i]] = i
radar = pyart.testing.make_empty_ppi_radar(ds.sizes[range_var], ds.sizes['time'], nsweeps)
radar.time['data'] = np.array(ds['time'].values)
# Add lat, lon, and alt
radar.latitude['data'] = np.array(ds[lat].values)
radar.longitude['data'] = np.array(ds[lon].values)
radar.altitude['data'] = np.array(ds[alt].values)
# Add sweep information
radar.sweep_number['data'] = swp
radar.sweep_start_ray_index['data'] = sweep_start_index
radar.sweep_end_ray_index['data'] = sweep_end_index
radar.sweep_mode['data'] = np.array(sweep_mode)
radar.scan_type = sweep_mode
# Add elevation, azimuth, etc...
radar.azimuth['data'] = np.array(ds[azimuth])
radar.elevation['data'] = np.array(ds[elevation])
radar.fixed_angle['data'] = np.array(ds[elevation].values[0])
radar.range['data'] = np.array(ds[range_var].values)
# Calculate radar points in lat/lon
radar.init_gate_altitude()
radar.init_gate_longitude_latitude()
# Add the fields to the radar object
fields = {}
for v in variables:
ref_dict = pyart.config.get_metadata(v)
ref_dict['data'] = np.array(ds[v].values)
fields[v] = ref_dict
radar.fields = fields
return radar
def convert_to_potential_temp(
ds=None,
temp_var_name=None,
press_var_name=None,
temperature=None,
pressure=None,
temp_var_units=None,
press_var_units=None,
):
"""
Converts temperature to potential temperature.
Parameters
----------
ds : xarray.DataSet
ACT Xarray dataset
temp_var_name : str
Temperature variable name in the ACT Xarray dataset containing
temperature data to convert.
press_var_name : str
Pressure variable name in the ACT Xarray dataset containing the
pressure data to use in conversion. If not set or set to None will
use values from pressure keyword.
pressure : int, float, numpy array
Optional pressure values to use instead of using values from xarray
dataset. If set must also set press_var_units keyword.
temp_var_units : string
Pint recognized units string for temperature data. If set to None will
use the units attribute under temperature variable in ds.
press_var_units : string
Pint recognized units string for pressure data. If set to None will
use the units attribute under pressure variable in the dataset. If using
the pressure keyword this must be set.
Returns
-------
potential_temperature : None, int, float, numpy array
The converted temperature to potential temperature or None if something
goes wrong.
References
----------
May, R. M., Arms, S. C., Marsh, P., Bruning, E., Leeman, J. R., Goebbert,
K., Thielen, J. E., and Bruick, Z., 2021: MetPy: A Python Package for
Meteorological Data. Unidata, https://github.com/Unidata/MetPy,
doi:10.5065/D6WW7G29.
"""
potential_temp = None
if temp_var_units is None and temp_var_name is not None:
temp_var_units = ds[temp_var_name].attrs['units']
if press_var_units is None and press_var_name is not None:
press_var_units = ds[press_var_name].attrs['units']
if press_var_units is None:
raise ValueError(
"Need to provide 'press_var_units' keyword " "when using 'pressure' keyword"
)
if temp_var_units is None:
raise ValueError(
"Need to provide 'temp_var_units' keyword " "when using 'temperature' keyword"
)
if temperature is not None:
temperature = metpy.units.units.Quantity(temperature, temp_var_units)
else:
temperature = metpy.units.units.Quantity(ds[temp_var_name].values, temp_var_units)
if pressure is not None:
pressure = metpy.units.units.Quantity(pressure, press_var_units)
else:
pressure = metpy.units.units.Quantity(ds[press_var_name].values, press_var_units)
with warnings.catch_warnings():
warnings.filterwarnings('ignore', category=RuntimeWarning)
potential_temp = metpy.calc.potential_temperature(pressure, temperature)
potential_temp = potential_temp.to(temp_var_units).magnitude
return potential_temp
def height_adjusted_temperature(
ds=None,
temp_var_name=None,
height_difference=0,
height_units='m',
press_var_name=None,
temperature=None,
temp_var_units=None,
pressure=101.325,
press_var_units='kPa',
):
"""
Converts temperature for change in height.
Parameters
----------
ds : xarray.DataSet, None
Optional Xarray dataset for retrieving pressure and temperature values.
Not needed if using temperature keyword.
temp_var_name : str, None
Optional temperature variable name in the Xarray dataset containing the
temperature data to use in conversion. If not set or set to None will
use values from temperature keyword.
height_difference : int, float
Required difference in height to adjust pressure values. Positive
values to increase height negative values to decrease height.
height_units : str
Units of height value.
press_var_name : str, None
Optional pressure variable name in the Xarray dataset containing the
pressure data to use in conversion. If not set or set to None will
use values from pressure keyword.
temperature : int, float, numpy array, None
Optional temperature values to use instead of values in the dataset.
temp_var_units : str, None
Pint recognized units string for temperature data. If set to None will
use the units attribute under temperature variable in the dataset.
If using the temperature keyword this must be set.
pressure : int, float, numpy array, None
Optional pressure values to use instead of values in the dataset.
Default value of sea level pressure is set for ease of use.
press_var_units : str, None
Pint recognized units string for pressure data. If set to None will
use the units attribute under pressure variable in the dataset.
If using the pressure keyword this must be set. Default value of
sea level pressure is set for ease of use.
Returns
-------
adjusted_temperature : None, int, float, numpy array
The height adjusted temperature or None if something goes wrong.
References
----------
May, R. M., Arms, S. C., Marsh, P., Bruning, E., Leeman, J. R., Goebbert,
K., Thielen, J. E., and Bruick, Z., 2021: MetPy: A Python Package for
Meteorological Data. Unidata, https://github.com/Unidata/MetPy,
doi:10.5065/D6WW7G29.
"""
adjusted_temperature = None
if temp_var_units is None and temperature is None:
temp_var_units = ds[temp_var_name].attrs['units']
if temp_var_units is None:
raise ValueError(
"Need to provide 'temp_var_units' keyword when " 'providing temperature keyword values.'
)
if temperature is not None:
temperature = metpy.units.units.Quantity(temperature, temp_var_units)
else:
temperature = metpy.units.units.Quantity(ds[temp_var_name].values, temp_var_units)
if press_var_name is not None:
pressure = metpy.units.units.Quantity(ds[press_var_name].values, press_var_units)
else:
pressure = metpy.units.units.Quantity(pressure, press_var_units)
adjusted_pressure = height_adjusted_pressure(
height_difference=height_difference,
height_units=height_units,
pressure=pressure.magnitude,
press_var_units=press_var_units,
)
adjusted_pressure = metpy.units.units.Quantity(adjusted_pressure, press_var_units)
with warnings.catch_warnings():
warnings.filterwarnings('ignore', category=RuntimeWarning)
adjusted_temperature = metpy.calc.dry_lapse(adjusted_pressure, temperature, pressure)
adjusted_temperature = adjusted_temperature.to(temp_var_units).magnitude
return adjusted_temperature
def height_adjusted_pressure(
ds=None,
press_var_name=None,
height_difference=0,
height_units='m',
pressure=None,
press_var_units=None,
):
"""
Converts pressure for change in height.
Parameters
----------
ds : xarray.DataSet, None
Optional Xarray dataset for retrieving pressure values. Not needed if
using pressure keyword.
press_var_name : str, None
Optional pressure variable name in the Xarray dataset containing the
pressure data to use in conversion. If not set or set to None will
use values from pressure keyword.
height_difference : int, float
Required difference in height to adjust pressure values. Positive
values to increase height negative values to decrease height.
height_units : str
Units of height value.
pressure : int, float, numpy array, None
Optional pressure values to use instead of values in the dataset.
press_var_units : str, None
Pint recognized units string for pressure data. If set to None will
use the units attribute under pressure variable in the dataset.
If using the pressure keyword this must be set.
Returns
-------
adjusted_pressure : None, int, float, numpy array
The height adjusted pressure or None if something goes wrong.
References
----------
May, R. M., Arms, S. C., Marsh, P., Bruning, E., Leeman, J. R., Goebbert,
K., Thielen, J. E., and Bruick, Z., 2021: MetPy: A Python Package for
Meteorological Data. Unidata, https://github.com/Unidata/MetPy,
doi:10.5065/D6WW7G29.
"""
adjusted_pressure = None
if press_var_units is None and pressure is None:
press_var_units = ds[press_var_name].attrs['units']
if press_var_units is None:
raise ValueError(
"Need to provide 'press_var_units' keyword when " 'providing pressure keyword values.'
)
if pressure is not None:
pressure = metpy.units.units.Quantity(pressure, press_var_units)
else:
pressure = metpy.units.units.Quantity(ds[press_var_name].values, press_var_units)
height_difference = metpy.units.units.Quantity(height_difference, height_units)
with warnings.catch_warnings():
warnings.filterwarnings('ignore', category=RuntimeWarning)
adjusted_pressure = metpy.calc.add_height_to_pressure(pressure, height_difference)
adjusted_pressure = adjusted_pressure.to(press_var_units).magnitude
return adjusted_pressure
<|code_end|>
examples/utils/plot_parse_filename.py
<|code_start|><|code_end|>
|
act/utils/data_utils.py
<|code_start|>"""
Module containing utilities for the data.
"""
import importlib
import warnings
import metpy
import numpy as np
import pint
import scipy.stats as stats
import xarray as xr
from pathlib import Path
import re
spec = importlib.util.find_spec('pyart')
if spec is not None:
PYART_AVAILABLE = True
else:
PYART_AVAILABLE = False
@xr.register_dataset_accessor('utils')
class ChangeUnits:
"""
Class for updating units in the dataset. Data values and units attribute
are updated in place. Coordinate variables can not be updated in place. Must
use new returned dataset when updating coordinage varibles.
"""
def __init__(self, ds):
self._ds = ds
def change_units(
self, variables=None, desired_unit=None, skip_variables=None, skip_standard=True
):
"""
Parameters
----------
variables : None, str or list of str
Variable names to attempt to change units.
desired_unit : str
Desired udunits unit string.
skip_variables : None, str or list of str
Variable names to skip. Works well when not providing a variables
keyword.
skip_standard : boolean
Flag indicating the QC variables that will not need changing are
skipped. Makes the processing faster when processing all variables
in dataset.
Returns
-------
dataset : xarray.dataset
A new dataset if the coordinate variables are updated. Required to
use returned dataset if coordinage variabels are updated,
otherwise the dataset is updated in place.
"""
if variables is not None and isinstance(variables, str):
variables = [variables]
if skip_variables is not None and isinstance(skip_variables, str):
skip_variables = [skip_variables]
if desired_unit is None:
raise ValueError("Need to provide 'desired_unit' keyword for .change_units() method")
if variables is None:
variables = list(self._ds.data_vars)
if skip_variables is not None:
variables = list(set(variables) - set(skip_variables))
for var_name in variables:
try:
if self._ds[var_name].attrs['standard_name'] == 'quality_flag':
continue
except KeyError:
pass
try:
data = convert_units(
self._ds[var_name].values,
self._ds[var_name].attrs['units'],
desired_unit,
)
try:
self._ds[var_name].values = data
self._ds[var_name].attrs['units'] = desired_unit
except ValueError:
attrs = self._ds[var_name].attrs
self._ds = self._ds.assign_coords({var_name: data})
attrs['units'] = desired_unit
self._ds[var_name].attrs = attrs
except (
KeyError,
pint.errors.DimensionalityError,
pint.errors.UndefinedUnitError,
np.core._exceptions.UFuncTypeError,
):
continue
return self._ds
# @xr.register_dataset_accessor('utils')
class DatastreamParserARM(object):
'''
Class to parse ARM datastream names or filenames into its components.
Will return None for each attribute if not extracted from the filename.
Attributes
----------
site : str or None
The site code extracted from the filename.
datastream_class : str
The datastream class extracted from the filename.
facility : str or None
The datastream facility code extracted from the filename.
level : str or None
The datastream level code extracted from the filename.
datastream : str or None
The datastram extracted from the filename.
date : str or None
The date extracted from the filename.
time : str or None
The time extracted from the filename.
ext : str or None
The file extension extracted from the filename.
Example
-------
>>> from act.utils.data_utils import DatastreamParserARM
>>> file = 'sgpmetE13.b1.20190501.024254.nc'
>>> fn_obj = DatastreamParserARM(file)
>>> fn_obj.site
'sgp'
>>> fn_obj.datastream_class
'met'
'''
def __init__(self, ds=''):
'''
Constructor that initializes datastream data member and runs
parse_datastream class method. Also converts datastream name to
lower case before parsing.
ds : str
The datastream or filename to parse
'''
if isinstance(ds, str):
self.__datastream = Path(ds).name
else:
raise ValueError('Datastream or filename name must be a string')
try:
self.__parse_datastream()
except ValueError:
self.__site = None
self.__class = None
self.__facility = None
self.__datastream = None
self.__level = None
self.__date = None
self.__time = None
self.__ext = None
def __parse_datastream(self):
'''
Private method to parse datastream name into its various components
(site, class, facility, and data level. Is called automatically by
constructor when object of class is instantiated and when the
set_datastream method is called to reset the object.
'''
# Import the built-in match function from regular expression library
# self.__datastream = self.__datastream
tempstring = self.__datastream.split('.')
# Check to see if ARM-standard filename was passed
self.__ext = None
self.__time = None
self.__date = None
self.__level = None
self.__site = None
self.__class = None
self.__facility = None
if len(tempstring) >= 5:
self.__ext = tempstring[4]
if len(tempstring) >= 4:
self.__time = tempstring[3]
if len(tempstring) >= 3:
self.__date = tempstring[2]
if len(tempstring) >= 2:
m = re.match('[abcs0][0123456789]', tempstring[1])
if m is not None:
self.__level = m.group()
match = False
m = re.search(r'(^[a-z]{3})(\w+)([A-Z]{1}\d{1,2})$', tempstring[0])
if m is not None:
self.__site = m.group(1)
self.__class = m.group(2)
self.__facility = m.group(3)
match = True
if not match:
m = re.search(r'(^[a-z]{3})(\w+)$', tempstring[0])
if m is not None:
self.__site = m.group(1)
self.__class = m.group(2)
match = True
if not match and len(tempstring[0]) == 3:
self.__site = tempstring[0]
match = True
if not match:
raise ValueError(self.__datastream)
def set_datastream(self, ds):
'''
Method used to set or reset object by passing a new datastream name.
'''
self.__init__(ds)
@property
def datastream(self):
'''
Property returning current datastream name stored in object in
standard lower case. Will return the datastrem with no level if
unavailable.
'''
try:
return ''.join((self.__site, self.__class, self.__facility, '.',
self.__level))
except TypeError:
return None
@property
def site(self):
'''
Property returning current site name stored in object in standard
lower case.
'''
return self.__site
@property
def datastream_class(self):
'''
Property returning current datastream class name stored in object in
standard lower case. Could not use class as attribute name since it
is a reserved word in Python
'''
return self.__class
@property
def facility(self):
'''
Property returning current facility name stored in object in
standard upper case.
'''
try:
return self.__facility.upper()
except AttributeError:
return self.__facility
@property
def level(self):
'''
Property returning current data level stored in object in standard
lower case.
'''
return self.__level
@property
def datastream_standard(self):
'''
Property returning datastream name in ARM-standard format with
facility in caps. Will return the datastream name with no level if
unavailable.
'''
try:
return ''.join((self.site, self.datastream_class, self.facility,
'.', self.level))
except TypeError:
return None
@property
def date(self):
'''
Property returning date from filename.
'''
return self.__date
@property
def time(self):
'''
Property returning time from filename.
'''
return self.__time
@property
def ext(self):
'''
Property returning file extension from filename.
'''
return self.__ext
def assign_coordinates(ds, coord_list):
"""
This procedure will create a new ACT dataset whose coordinates are
designated to be the variables in a given list. This helps make data
slicing via xarray and visualization easier.
Parameters
----------
ds : ACT Dataset
The ACT Dataset to modify the coordinates of.
coord_list : dict
The list of variables to assign as coordinates, given as a dictionary
whose keys are the variable name and values are the dimension name.
Returns
-------
new_ds : ACT Dataset
The new ACT Dataset with the coordinates assigned to be the given
variables.
"""
# Check to make sure that user assigned valid entries for coordinates
for coord in coord_list.keys():
if coord not in ds.variables.keys():
raise KeyError(coord + ' is not a variable in the Dataset.')
if ds.dims[coord_list[coord]] != len(ds.variables[coord]):
raise IndexError(
coord + ' must have the same ' + 'value as length of ' + coord_list[coord]
)
new_ds_dict = {}
for variable in ds.variables.keys():
my_coord_dict = {}
dataarray = ds[variable]
if len(dataarray.dims) > 0:
for coord in coord_list.keys():
if coord_list[coord] in dataarray.dims:
my_coord_dict[coord_list[coord]] = ds[coord]
if variable not in my_coord_dict.keys() and variable not in ds.dims:
the_dataarray = xr.DataArray(dataarray.data, coords=my_coord_dict, dims=dataarray.dims)
new_ds_dict[variable] = the_dataarray
new_ds = xr.Dataset(new_ds_dict, coords=my_coord_dict)
return new_ds
def add_in_nan(time, data):
"""
This procedure adds in NaNs when there is a larger than expected time step.
This is useful for timeseries where there is a gap in data and need a
NaN value to stop plotting from connecting data over the large data gap.
Parameters
----------
time : 1D array of numpy datetime64 or Xarray DataArray of datetime64
Times in the timeseries.
data : 1D or 2D numpy array or Xarray DataArray
Array containing the data. The 0 axis corresponds to time.
Returns
-------
time : numpy array or Xarray DataArray
The array containing the new times including a NaN filled
sampe or slice if multi-dimensional.
The intervals are determined by the mode of the timestep in *time*.
data : numpy array or Xarray DataArray
The array containing the NaN-indserted data.
"""
time_is_DataArray = False
data_is_DataArray = False
if isinstance(time, xr.core.dataarray.DataArray):
time_is_DataArray = True
time_attributes = time.attrs
time_dims = time.dims
if isinstance(data, xr.core.dataarray.DataArray):
data_is_DataArray = True
data_attributes = data.attrs
data_dims = data.dims
# Return if time dimension is only size one since we can't do differences.
if time.size > 2:
data = np.asarray(data)
time = np.asarray(time)
# Not sure if we need to set to second data type to make it work better.
# Leaving code in here in case we need to update.
# diff = np.diff(time.astype('datetime64[s]'), 1)
diff = np.diff(time, 1)
# Wrapping in a try to catch error while switching between numpy 1.10 to 1.11
try:
mode = stats.mode(diff, keepdims=True).mode[0]
except TypeError:
mode = stats.mode(diff).mode[0]
index = np.where(diff > (2.0 * mode))
offset = 0
for i in index[0]:
corr_i = i + offset
time_added = time[corr_i] + (time[corr_i + 1] - time[corr_i]) / 2.0
time = np.insert(time, corr_i + 1, time_added)
data = np.insert(data, corr_i + 1, np.nan, axis=0)
offset += 1
if time_is_DataArray:
time = xr.DataArray(time, attrs=time_attributes, dims=time_dims)
if data_is_DataArray:
data = xr.DataArray(data, attrs=data_attributes, dims=data_dims)
return time, data
def get_missing_value(
ds,
variable,
default=-9999,
add_if_missing_in_ds=False,
use_FillValue=False,
nodefault=False,
):
"""
Function to get missing value from missing_value or _FillValue attribute.
Works well with catching errors and allows for a default value when a
missing value is not listed in the dataset. You may get strange results
becaus xarray will automatically convert all missing_value or
_FillValue to NaN and then remove the missing_value and
_FillValue variable attribute when reading data with default settings.
Parameters
----------
ds : xarray.Dataset
Xarray dataset containing data variable.
variable : str
Variable name to use for getting missing value.
default : int or float
Default value to use if missing value attribute is not in dataset.
add_if_missing_in_ds : bool
Boolean to add to the dataset if does not exist. Default is False.
use_FillValue : bool
Boolean to use _FillValue instead of missing_value. If missing_value
does exist and _FillValue does not will add _FillValue
set to missing_value value.
nodefault : bool
Option to use this to check if the varible has a missing value set and
do not want to get default as return. If the missing value is found
will return, else will return None.
Returns
-------
missing : scalar int or float (or None)
Value used to indicate missing value matching type of data or None if
nodefault keyword set to True.
Examples
--------
.. code-block:: python
from act.utils import get_missing_value
missing = get_missing_value(dq_ds, "temp_mean")
print(missing)
-9999.0
"""
in_ds = False
if use_FillValue:
missing_atts = ['_FillValue', 'missing_value']
else:
missing_atts = ['missing_value', '_FillValue']
for att in missing_atts:
try:
missing = ds[variable].attrs[att]
in_ds = True
break
except (AttributeError, KeyError):
missing = default
# Check if do not want a default value retured and a value
# was not fund.
if nodefault is True and in_ds is False:
missing = None
return missing
# Check data type and try to match missing_value to the data type of data
try:
missing = ds[variable].data.dtype.type(missing)
except KeyError:
pass
except AttributeError:
print(
('--- AttributeError: Issue trying to get data type ' + 'from "{}" data ---').format(
variable
)
)
# If requested add missing value to the dataset
if add_if_missing_in_ds and not in_ds:
try:
ds[variable].attrs[missing_atts[0]] = missing
except KeyError:
print(
('--- KeyError: Issue trying to add "{}" ' + 'attribute to "{}" ---').format(
missing_atts[0], variable
)
)
return missing
def convert_units(data, in_units, out_units):
"""
Wrapper function around library to convert data using unit strings.
Currently using pint units library. Will attempt to preserve numpy
data type, but will upconvert to numpy float64 if need to change
data type for converted values.
Parameters
----------
data : list, tuple or numpy array
Data array to be modified.
in_units : str
Units scalar string of input data array.
out_units : str
Units scalar string of desired output data array.
Returns
-------
data : numpy array
Data array converted into new units.
Examples
--------
> data = np.array([1,2,3,4,5,6])
> data = convert_units(data, 'cm', 'm')
> data
array([0.01, 0.02, 0.03, 0.04, 0.05, 0.06])
"""
# Fix historical and current incorrect usage of units.
convert_dict = {
'C': 'degC',
'F': 'degF',
'%': 'percent', # seems like pint does not like this symbol?
'1': 'unitless', # seems like pint does not like this number?
}
if in_units in convert_dict:
in_units = convert_dict[in_units]
if out_units in convert_dict:
out_units = convert_dict[out_units]
if in_units == out_units:
return data
# Instantiate the registry
ureg = pint.UnitRegistry(autoconvert_offset_to_baseunit=True)
# Add missing units
ureg.define('percent = 0.01*count = %')
ureg.define('unitless = count = 1')
if not isinstance(data, np.ndarray):
data = np.array(data)
data_type = data.dtype
data_type_kind = data.dtype.kind
# Do the conversion magic
data = (data * ureg(in_units)).to(out_units)
data = data.magnitude
# The data type may be changed by pint. This is a side effect
# of pint changing the datatype to float. Check if the converted values
# need float precision. If so leave, if not change back to orginal
# precision after checking if the precsion is not lost with the orginal
# data type.
if (
data_type_kind == 'i'
and np.nanmin(data) >= np.iinfo(data_type).min
and np.nanmax(data) <= np.iinfo(data_type).max
and np.all(np.mod(data, 1) == 0)
):
data = data.astype(data_type)
return data
def ts_weighted_average(ts_dict):
"""
Program to take in multiple difference time-series and average them
using the weights provided. This assumes that the variables passed in
all have the same units. Please see example gallery for an example.
NOTE: All weights should add up to 1
Parameters
----------
ts_dict : dict
Dictionary containing datastream, variable, weight, and datasets
.. code-block:: python
t_dict = {
"sgpvdisC1.b1": {
"variable": "rain_rate",
"weight": 0.05,
"ds": ds,
},
"sgpmetE13.b1": {
"variable": [
"tbrg_precip_total",
"org_precip_rate_mean",
"pwd_precip_rate_mean_1min",
],
"weight": [0.25, 0.05, 0.0125],
},
}
Returns
-------
data : numpy array
Variable of time-series averaged data
"""
# Run through each datastream/variable and get data
da_array = []
data = 0.0
for d in ts_dict:
for i, v in enumerate(ts_dict[d]['variable']):
new_name = '_'.join([d, v])
# Since many variables may have same name, rename with datastream
da = ts_dict[d]['ds'][v].rename(new_name)
# Apply Weights to Data
da.values = da.values * ts_dict[d]['weight'][i]
da_array.append(da)
da = xr.merge(da_array)
# Stack all the data into a 2D time series
data = None
for i, d in enumerate(da):
if i == 0:
data = da[d].values
else:
data = np.vstack((data, da[d].values))
# Sum data across each time sample
data = np.nansum(data, 0)
# Add data to data array and return
dims = ts_dict[list(ts_dict.keys())[0]]['ds'].dims
da_xr = xr.DataArray(
data,
dims=dims,
coords={'time': ts_dict[list(ts_dict.keys())[0]]['ds']['time']},
)
da_xr.attrs['long_name'] = 'Weighted average of ' + ', '.join(list(ts_dict.keys()))
return da_xr
def accumulate_precip(ds, variable, time_delta=None):
"""
Program to accumulate rain rates from an act xarray dataset and insert
variable back into an act xarray dataset with "_accumulated" appended to
the variable name. Please verify that your units are accurately described
in the data.
Parameters
----------
ds : xarray.DataSet
ACT Xarray dataset.
variable : string
Variable name.
time_delta : float
Time delta to caculate precip accumulations over.
Useful if full time series is not passed in.
Returns
-------
ds : xarray.DataSet
ACT Xarray dataset with variable_accumulated.
"""
# Get Data, time, and metadat
data = ds[variable]
time = ds.coords['time']
units = ds[variable].attrs['units']
# Calculate mode of the time samples(i.e. 1 min vs 1 sec)
if time_delta is None:
diff = np.diff(time.values, 1) / np.timedelta64(1, 's')
try:
t_delta = stats.mode(diff, keepdims=False).mode
except TypeError:
t_delta = stats.mode(diff).mode
else:
t_delta = time_delta
# Calculate the accumulation based on the units
t_factor = t_delta / 60.0
if units == 'mm/hr':
data = data * (t_factor / 60.0)
accum = np.nancumsum(data.values)
# Add accumulated variable back to the dataset
long_name = 'Accumulated precipitation'
attrs = {'long_name': long_name, 'units': 'mm'}
ds['_'.join([variable, 'accumulated'])] = xr.DataArray(
accum, coords=ds[variable].coords, attrs=attrs
)
return ds
def create_pyart_obj(
ds,
variables=None,
sweep=None,
azimuth=None,
elevation=None,
range_var=None,
sweep_start=None,
sweep_end=None,
lat=None,
lon=None,
alt=None,
sweep_mode='ppi',
sweep_az_thresh=10.0,
sweep_el_thresh=0.5,
):
"""
Produces a Py-ART radar object based on data in the ACT Xarray dataset.
Parameters
----------
ds : xarray.DataSet
ACT Xarray dataset.
variables : list
List of variables to add to the radar object, will default to all
variables.
sweep : string
Name of variable that has sweep information. If none, will try and
calculate from the azimuth and elevation.
azimuth : string
Name of azimuth variable. Will try and find one if none given.
elevation : string
Name of elevation variable. Will try and find one if none given.
range_var : string
Name of the range variable. Will try and find one if none given.
sweep_start : string
Name of variable with sweep start indices.
sweep_end : string
Name of variable with sweep end indices.
lat : string
Name of latitude variable. Will try and find one if none given.
lon : string
Name of longitude variable. Will try and find one if none given.
alt : string
Name of altitude variable. Will try and find one if none given.
sweep_mode : string
Type of scan. Defaults to PPI.
sweep_az_thresh : float
If calculating sweep numbers, the maximum change in azimuth before new
sweep.
sweep_el_thresh : float
If calculating sweep numbers, the maximum change in elevation before
new sweep.
Returns
-------
radar : radar.Radar
Py-ART Radar Object.
"""
if not PYART_AVAILABLE:
raise ImportError(
'Py-ART needs to be installed on your system to convert to ' 'Py-ART Object.'
)
else:
import pyart
# Get list of variables if none provided
if variables is None:
variables = list(ds.keys())
# Determine the sweeps if not already in a variable$a
if sweep is None:
swp = np.zeros(ds.sizes['time'])
for key in ds.variables.keys():
if len(ds.variables[key].shape) == 2:
total_rays = ds.variables[key].shape[0]
break
nsweeps = int(total_rays / ds.variables['time'].shape[0])
else:
swp = ds[sweep].values
nsweeps = ds[sweep].values
# Get coordinate variables
if lat is None:
lat = [s for s in variables if 'latitude' in s]
if len(lat) == 0:
lat = [s for s in variables if 'lat' in s]
if len(lat) == 0:
raise ValueError(
'Latitude variable not set and could not be ' 'discerned from the data.'
)
else:
lat = lat[0]
if lon is None:
lon = [s for s in variables if 'longitude' in s]
if len(lon) == 0:
lon = [s for s in variables if 'lon' in s]
if len(lon) == 0:
raise ValueError(
'Longitude variable not set and could not be ' 'discerned from the data.'
)
else:
lon = lon[0]
if alt is None:
alt = [s for s in variables if 'altitude' in s]
if len(alt) == 0:
alt = [s for s in variables if 'alt' in s]
if len(alt) == 0:
raise ValueError(
'Altitude variable not set and could not be ' 'discerned from the data.'
)
else:
alt = alt[0]
# Get additional variable names if none provided
if azimuth is None:
azimuth = [s for s in sorted(variables) if 'azimuth' in s][0]
if len(azimuth) == 0:
raise ValueError(
'Azimuth variable not set and could not be ' 'discerned from the data.'
)
if elevation is None:
elevation = [s for s in sorted(variables) if 'elevation' in s][0]
if len(elevation) == 0:
raise ValueError(
'Elevation variable not set and could not be ' 'discerned from the data.'
)
if range_var is None:
range_var = [s for s in sorted(variables) if 'range' in s][0]
if len(range_var) == 0:
raise ValueError('Range variable not set and could not be ' 'discerned from the data.')
# Calculate the sweep indices if not passed in
if sweep_start is None and sweep_end is None:
az_diff = np.abs(np.diff(ds[azimuth].values))
az_idx = az_diff > sweep_az_thresh
el_diff = np.abs(np.diff(ds[elevation].values))
el_idx = el_diff > sweep_el_thresh
# Create index list
az_index = list(np.where(az_idx)[0] + 1)
el_index = list(np.where(el_idx)[0] + 1)
index = sorted(az_index + el_index)
index.insert(0, 0)
index += [ds.sizes['time']]
sweep_start_index = []
sweep_end_index = []
for i in range(len(index) - 1):
sweep_start_index.append(index[i])
sweep_end_index.append(index[i + 1] - 1)
swp[index[i] : index[i + 1]] = i
else:
sweep_start_index = ds[sweep_start].values
sweep_end_index = ds[sweep_end].values
if sweep is None:
for i in range(len(sweep_start_index)):
swp[sweep_start_index[i] : sweep_end_index[i]] = i
radar = pyart.testing.make_empty_ppi_radar(ds.sizes[range_var], ds.sizes['time'], nsweeps)
radar.time['data'] = np.array(ds['time'].values)
# Add lat, lon, and alt
radar.latitude['data'] = np.array(ds[lat].values)
radar.longitude['data'] = np.array(ds[lon].values)
radar.altitude['data'] = np.array(ds[alt].values)
# Add sweep information
radar.sweep_number['data'] = swp
radar.sweep_start_ray_index['data'] = sweep_start_index
radar.sweep_end_ray_index['data'] = sweep_end_index
radar.sweep_mode['data'] = np.array(sweep_mode)
radar.scan_type = sweep_mode
# Add elevation, azimuth, etc...
radar.azimuth['data'] = np.array(ds[azimuth])
radar.elevation['data'] = np.array(ds[elevation])
radar.fixed_angle['data'] = np.array(ds[elevation].values[0])
radar.range['data'] = np.array(ds[range_var].values)
# Calculate radar points in lat/lon
radar.init_gate_altitude()
radar.init_gate_longitude_latitude()
# Add the fields to the radar object
fields = {}
for v in variables:
ref_dict = pyart.config.get_metadata(v)
ref_dict['data'] = np.array(ds[v].values)
fields[v] = ref_dict
radar.fields = fields
return radar
def convert_to_potential_temp(
ds=None,
temp_var_name=None,
press_var_name=None,
temperature=None,
pressure=None,
temp_var_units=None,
press_var_units=None,
):
"""
Converts temperature to potential temperature.
Parameters
----------
ds : xarray.DataSet
ACT Xarray dataset
temp_var_name : str
Temperature variable name in the ACT Xarray dataset containing
temperature data to convert.
press_var_name : str
Pressure variable name in the ACT Xarray dataset containing the
pressure data to use in conversion. If not set or set to None will
use values from pressure keyword.
pressure : int, float, numpy array
Optional pressure values to use instead of using values from xarray
dataset. If set must also set press_var_units keyword.
temp_var_units : string
Pint recognized units string for temperature data. If set to None will
use the units attribute under temperature variable in ds.
press_var_units : string
Pint recognized units string for pressure data. If set to None will
use the units attribute under pressure variable in the dataset. If using
the pressure keyword this must be set.
Returns
-------
potential_temperature : None, int, float, numpy array
The converted temperature to potential temperature or None if something
goes wrong.
References
----------
May, R. M., Arms, S. C., Marsh, P., Bruning, E., Leeman, J. R., Goebbert,
K., Thielen, J. E., and Bruick, Z., 2021: MetPy: A Python Package for
Meteorological Data. Unidata, https://github.com/Unidata/MetPy,
doi:10.5065/D6WW7G29.
"""
potential_temp = None
if temp_var_units is None and temp_var_name is not None:
temp_var_units = ds[temp_var_name].attrs['units']
if press_var_units is None and press_var_name is not None:
press_var_units = ds[press_var_name].attrs['units']
if press_var_units is None:
raise ValueError(
"Need to provide 'press_var_units' keyword " "when using 'pressure' keyword"
)
if temp_var_units is None:
raise ValueError(
"Need to provide 'temp_var_units' keyword " "when using 'temperature' keyword"
)
if temperature is not None:
temperature = metpy.units.units.Quantity(temperature, temp_var_units)
else:
temperature = metpy.units.units.Quantity(ds[temp_var_name].values, temp_var_units)
if pressure is not None:
pressure = metpy.units.units.Quantity(pressure, press_var_units)
else:
pressure = metpy.units.units.Quantity(ds[press_var_name].values, press_var_units)
with warnings.catch_warnings():
warnings.filterwarnings('ignore', category=RuntimeWarning)
potential_temp = metpy.calc.potential_temperature(pressure, temperature)
potential_temp = potential_temp.to(temp_var_units).magnitude
return potential_temp
def height_adjusted_temperature(
ds=None,
temp_var_name=None,
height_difference=0,
height_units='m',
press_var_name=None,
temperature=None,
temp_var_units=None,
pressure=101.325,
press_var_units='kPa',
):
"""
Converts temperature for change in height.
Parameters
----------
ds : xarray.DataSet, None
Optional Xarray dataset for retrieving pressure and temperature values.
Not needed if using temperature keyword.
temp_var_name : str, None
Optional temperature variable name in the Xarray dataset containing the
temperature data to use in conversion. If not set or set to None will
use values from temperature keyword.
height_difference : int, float
Required difference in height to adjust pressure values. Positive
values to increase height negative values to decrease height.
height_units : str
Units of height value.
press_var_name : str, None
Optional pressure variable name in the Xarray dataset containing the
pressure data to use in conversion. If not set or set to None will
use values from pressure keyword.
temperature : int, float, numpy array, None
Optional temperature values to use instead of values in the dataset.
temp_var_units : str, None
Pint recognized units string for temperature data. If set to None will
use the units attribute under temperature variable in the dataset.
If using the temperature keyword this must be set.
pressure : int, float, numpy array, None
Optional pressure values to use instead of values in the dataset.
Default value of sea level pressure is set for ease of use.
press_var_units : str, None
Pint recognized units string for pressure data. If set to None will
use the units attribute under pressure variable in the dataset.
If using the pressure keyword this must be set. Default value of
sea level pressure is set for ease of use.
Returns
-------
adjusted_temperature : None, int, float, numpy array
The height adjusted temperature or None if something goes wrong.
References
----------
May, R. M., Arms, S. C., Marsh, P., Bruning, E., Leeman, J. R., Goebbert,
K., Thielen, J. E., and Bruick, Z., 2021: MetPy: A Python Package for
Meteorological Data. Unidata, https://github.com/Unidata/MetPy,
doi:10.5065/D6WW7G29.
"""
adjusted_temperature = None
if temp_var_units is None and temperature is None:
temp_var_units = ds[temp_var_name].attrs['units']
if temp_var_units is None:
raise ValueError(
"Need to provide 'temp_var_units' keyword when " 'providing temperature keyword values.'
)
if temperature is not None:
temperature = metpy.units.units.Quantity(temperature, temp_var_units)
else:
temperature = metpy.units.units.Quantity(ds[temp_var_name].values, temp_var_units)
if press_var_name is not None:
pressure = metpy.units.units.Quantity(ds[press_var_name].values, press_var_units)
else:
pressure = metpy.units.units.Quantity(pressure, press_var_units)
adjusted_pressure = height_adjusted_pressure(
height_difference=height_difference,
height_units=height_units,
pressure=pressure.magnitude,
press_var_units=press_var_units,
)
adjusted_pressure = metpy.units.units.Quantity(adjusted_pressure, press_var_units)
with warnings.catch_warnings():
warnings.filterwarnings('ignore', category=RuntimeWarning)
adjusted_temperature = metpy.calc.dry_lapse(adjusted_pressure, temperature, pressure)
adjusted_temperature = adjusted_temperature.to(temp_var_units).magnitude
return adjusted_temperature
def height_adjusted_pressure(
ds=None,
press_var_name=None,
height_difference=0,
height_units='m',
pressure=None,
press_var_units=None,
):
"""
Converts pressure for change in height.
Parameters
----------
ds : xarray.DataSet, None
Optional Xarray dataset for retrieving pressure values. Not needed if
using pressure keyword.
press_var_name : str, None
Optional pressure variable name in the Xarray dataset containing the
pressure data to use in conversion. If not set or set to None will
use values from pressure keyword.
height_difference : int, float
Required difference in height to adjust pressure values. Positive
values to increase height negative values to decrease height.
height_units : str
Units of height value.
pressure : int, float, numpy array, None
Optional pressure values to use instead of values in the dataset.
press_var_units : str, None
Pint recognized units string for pressure data. If set to None will
use the units attribute under pressure variable in the dataset.
If using the pressure keyword this must be set.
Returns
-------
adjusted_pressure : None, int, float, numpy array
The height adjusted pressure or None if something goes wrong.
References
----------
May, R. M., Arms, S. C., Marsh, P., Bruning, E., Leeman, J. R., Goebbert,
K., Thielen, J. E., and Bruick, Z., 2021: MetPy: A Python Package for
Meteorological Data. Unidata, https://github.com/Unidata/MetPy,
doi:10.5065/D6WW7G29.
"""
adjusted_pressure = None
if press_var_units is None and pressure is None:
press_var_units = ds[press_var_name].attrs['units']
if press_var_units is None:
raise ValueError(
"Need to provide 'press_var_units' keyword when " 'providing pressure keyword values.'
)
if pressure is not None:
pressure = metpy.units.units.Quantity(pressure, press_var_units)
else:
pressure = metpy.units.units.Quantity(ds[press_var_name].values, press_var_units)
height_difference = metpy.units.units.Quantity(height_difference, height_units)
with warnings.catch_warnings():
warnings.filterwarnings('ignore', category=RuntimeWarning)
adjusted_pressure = metpy.calc.add_height_to_pressure(pressure, height_difference)
adjusted_pressure = adjusted_pressure.to(press_var_units).magnitude
return adjusted_pressure
<|code_end|>
examples/utils/plot_parse_filename.py
<|code_start|>"""
Parse the ARM datastream filename
-------------------------
This is an example of how to parse
the datastream filename into its constituent parts.
"""
from act.utils.data_utils import DatastreamParserARM
# Here we have a full path filename.
filename = '/data/sgp/sgpmetE13.b1/sgpmetE13.b1.20190501.024254.nc'
# What if we want to extract some metadata from the filename instead of reading the file
# and extracting from the global attributes. We can call the DatastreamParserARM() method
# and extract the string value from the object using its properties.
fn_obj = DatastreamParserARM(filename)
print(f"Site is {fn_obj.site}")
print(f"Datastream Class is {fn_obj.datastream_class}")
print(f"Facility is {fn_obj.facility}")
print(f"Level is {fn_obj.level}")
print(f"Datastream is {fn_obj.datastream}")
print(f"Date is {fn_obj.date}")
print(f"Time is {fn_obj.time}")
print(f"File extension is {fn_obj.ext}")
# We can also use the parser for just the datastream part to extract the parts.
# The other methods will not have a value and return None.
filename = 'sgpmetE13.b1'
fn_obj = DatastreamParserARM(filename)
print(f"\nSite is {fn_obj.site}")
print(f"Datastream Class is {fn_obj.datastream_class}")
print(f"Facility is {fn_obj.facility}")
print(f"Level is {fn_obj.level}")
print(f"Datastream is {fn_obj.datastream}")
print(f"Date is {fn_obj.date}")
print(f"Time is {fn_obj.time}")
print(f"File extension is {fn_obj.ext}")
<|code_end|>
|
New Example Template
### Description
For new contributors that might want to contribute an example, it would be beneficial for them to have a template that they could use to get started. Recommend that we add a "Template" section to the Examples area and include templates for Examples or even initial functions including documentation.
|
examples/templates/example_template.py
<|code_start|><|code_end|>
|
examples/templates/example_template.py
<|code_start|># Place python module imports here, example:
import os
import matplotlib.pyplot as plt
import act
# Place arm username and token or example file if username and token
# aren't set, example:
username = os.getenv('ARM_USERNAME')
token = os.getenv('ARM_PASSWORD')
# Download and read file or files with the IO and discovery functions
# within ACT, example:
results = act.discovery.download_data(
username, token, 'sgpceilC1.b1', '2022-01-14', '2022-01-19')
ceil_ds = act.io.armfiles.read_netcdf(results)
# Plot file using the ACT display submodule, example:
display = act.plotting.TimeSeriesDisplay(ceil_ds)
display.plot('backscatter')
plt.show()
<|code_end|>
|
Sunset Stamen maps in GeoDisplay and potentially replace
Stamen is transitioning their maps to stadia at the end of October 2023. ACT will need to deprecate that feature in GeoDisplay and potentially look for replacements.
https://github.com/SciTools/cartopy/pull/2266
|
act/plotting/geodisplay.py
<|code_start|>"""
Stores the class for GeographicPlotDisplay.
"""
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from .plot import Display
try:
import cartopy.crs as ccrs
import cartopy.feature as cfeature
from cartopy.io.img_tiles import Stamen
CARTOPY_AVAILABLE = True
except ImportError:
CARTOPY_AVAILABLE = False
class GeographicPlotDisplay(Display):
"""
A class for making geographic tracer plot of aircraft, ship or other moving
platform plot.
This is inherited from the :func:`act.plotting.Display`
class and has therefore has the same attributes as that class.
See :func:`act.plotting.Display`
for more information. There are no additional attributes or parameters
to this class.
In order to create geographic plots, ACT needs the Cartopy package to be
installed on your system. More information about
Cartopy go here:https://scitools.org.uk/cartopy/docs/latest/ .
"""
def __init__(self, ds, ds_name=None, **kwargs):
if not CARTOPY_AVAILABLE:
raise ImportError(
'Cartopy needs to be installed on your ' 'system to make geographic display plots.'
)
super().__init__(ds, ds_name, **kwargs)
if self.fig is None:
self.fig = plt.figure(**kwargs)
def geoplot(
self,
data_field=None,
lat_field='lat',
lon_field='lon',
dsname=None,
cbar_label=None,
title=None,
projection=None,
plot_buffer=0.08,
stamen='terrain-background',
tile=8,
cartopy_feature=None,
cmap='rainbow',
text=None,
gridlines=True,
**kwargs,
):
"""
Creates a latitude and longitude plot of a time series data set with
data values indicated by color and described with a colorbar.
Latitude values must be in degree north (-90 to 90) and
longitude must be in degree east (-180 to 180).
Parameters
----------
data_field : str
Name of data field in the dataset to plot.
lat_field : str
Name of latitude field in the dataset to use.
lon_field : str
Name of longitude field in the dataset to use.
dsname : str or None
The name of the datastream to plot. Set to None to make ACT
attempt to automatically determine this.
cbar_label : str
Label to use with colorbar. If set to None will attempt
to create label from long_name and units.
title : str
Plot title.
projection : cartopy.crs object
Project to use on plot. See
https://scitools.org.uk/cartopy/docs/latest/reference/projections.html?highlight=projections
plot_buffer : float
Buffer to add around data on plot in lat and lon dimension.
stamen : str
Dataset to use for background image. Set to None to not use
background image.
tile : int
Tile zoom to use with background image. Higer number indicates
more resolution. A value of 8 is typical for a normal sonde plot.
cartopy_feature : list of str or str
Cartopy feature to add to plot.
cmap : str
Color map to use for colorbar.
text : dictionary
Dictionary of {text:[lon,lat]} to add to plot. Can have more
than one set of text to add.
gridlines : boolean
Use latitude and longitude gridlines.
**kwargs : keyword arguments
Any other keyword arguments that will be passed
into :func:`matplotlib.pyplot.scatter` when the figure
is made. See the matplotlib documentation for further details
on what keyword arguments are available.
"""
if dsname is None and len(self._ds.keys()) > 1:
raise ValueError(
'You must choose a datastream when there are 2 '
'or more datasets in the GeographicPlotDisplay '
'object.'
)
elif dsname is None:
dsname = list(self._ds.keys())[0]
if data_field is None:
raise ValueError('You must enter the name of the data ' 'to be plotted.')
if projection is None:
if CARTOPY_AVAILABLE:
projection = ccrs.PlateCarree()
# Extract data from the dataset
try:
lat = self._ds[dsname][lat_field].values
except KeyError:
raise ValueError(
(
'You will need to provide the name of the '
"field if not '{}' to use for latitude "
'data.'
).format(lat_field)
)
try:
lon = self._ds[dsname][lon_field].values
except KeyError:
raise ValueError(
(
'You will need to provide the name of the '
"field if not '{}' to use for longitude "
'data.'
).format(lon_field)
)
# Set up metadata information for display on plot
if cbar_label is None:
try:
cbar_label = (
self._ds[dsname][data_field].attrs['long_name']
+ ' ('
+ self._ds[dsname][data_field].attrs['units']
+ ')'
)
except KeyError:
cbar_label = data_field
lat_limits = [np.nanmin(lat), np.nanmax(lat)]
lon_limits = [np.nanmin(lon), np.nanmax(lon)]
box_size = np.max([np.abs(np.diff(lat_limits)), np.abs(np.diff(lon_limits))])
bx_buf = box_size * plot_buffer
lat_center = np.sum(lat_limits) / 2.0
lon_center = np.sum(lon_limits) / 2.0
lat_limits = [
lat_center - box_size / 2.0 - bx_buf,
lat_center + box_size / 2.0 + bx_buf,
]
lon_limits = [
lon_center - box_size / 2.0 - bx_buf,
lon_center + box_size / 2.0 + bx_buf,
]
data = self._ds[dsname][data_field].values
# Create base plot projection
ax = plt.axes(projection=projection)
plt.subplots_adjust(left=0.01, right=0.99, bottom=0.05, top=0.93)
ax.set_extent([lon_limits[0], lon_limits[1], lat_limits[0], lat_limits[1]], crs=projection)
if title is None:
try:
dim = list(self._ds[dsname][data_field].dims)
ts = pd.to_datetime(str(self._ds[dsname][dim[0]].values[0]))
date = ts.strftime('%Y-%m-%d')
time_str = ts.strftime('%H:%M:%S')
plt.title(' '.join([dsname, 'at', date, time_str]))
except NameError:
plt.title(dsname)
else:
plt.title(title)
if stamen:
tiler = Stamen(stamen)
ax.add_image(tiler, tile)
colorbar_map = None
if cmap is not None:
colorbar_map = matplotlib.colormaps.get_cmap(cmap)
sc = ax.scatter(lon, lat, c=data, cmap=colorbar_map, **kwargs)
cbar = plt.colorbar(sc)
cbar.ax.set_ylabel(cbar_label)
if cartopy_feature is not None:
if isinstance(cartopy_feature, str):
cartopy_feature = [cartopy_feature]
cartopy_feature = [ii.upper() for ii in cartopy_feature]
if 'STATES' in cartopy_feature:
ax.add_feature(cfeature.STATES.with_scale('10m'))
if 'LAND' in cartopy_feature:
ax.add_feature(cfeature.LAND)
if 'OCEAN' in cartopy_feature:
ax.add_feature(cfeature.OCEAN)
if 'COASTLINE' in cartopy_feature:
ax.add_feature(cfeature.COASTLINE)
if 'BORDERS' in cartopy_feature:
ax.add_feature(cfeature.BORDERS, linestyle=':')
if 'LAKES' in cartopy_feature:
ax.add_feature(cfeature.LAKES, alpha=0.5)
if 'RIVERS' in cartopy_feature:
ax.add_feature(cfeature.RIVERS)
if text is not None:
for label, location in text.items():
ax.plot(location[0], location[1], marker='*', color='black')
ax.text(location[0], location[1], label, color='black')
if gridlines:
if projection == ccrs.PlateCarree() or projection == ccrs.Mercator:
gl = ax.gridlines(
crs=projection,
draw_labels=True,
linewidth=1,
color='gray',
alpha=0.5,
linestyle='--',
)
gl.top_labels = False
gl.left_labels = True
gl.bottom_labels = True
gl.right_labels = False
gl.xlabel_style = {'size': 6, 'color': 'gray'}
gl.ylabel_style = {'size': 6, 'color': 'gray'}
else:
# Labels are only currently supported for PlateCarree and Mercator
gl = ax.gridlines(
draw_labels=False,
linewidth=1,
color='gray',
alpha=0.5,
linestyle='--',
)
return ax
<|code_end|>
examples/plotting/plot_aaf_track.py
<|code_start|>"""
Plot ARM AAF Flight Path
--------------------------------
Plot the ARM AAF flight path using the GeographicPlotDisplay
Author: Joe O'Brien
"""
import matplotlib.pyplot as plt
import act
from act.io.icartt import read_icartt
# Call the read_icartt function, which supports input
# for ICARTT (v2.0) formatted files.
# Example file is ARM Aerial Facility Navigation Data
ds = read_icartt(act.tests.EXAMPLE_AAF_ICARTT)
# Use GeographicPlotDisplay for referencing.
# NOTE: Cartopy is needed!
display = act.plotting.GeographicPlotDisplay(ds, figsize=(12, 10))
# Plot the ARM AAF flight track with respect to Pressure Altitude
display.geoplot('press_alt', lat_field='lat', lon_field='lon', stamen=None)
# Display the plot
plt.show()
<|code_end|>
|
act/plotting/geodisplay.py
<|code_start|>"""
Stores the class for GeographicPlotDisplay.
"""
import warnings
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from .plot import Display
try:
import cartopy.crs as ccrs
import cartopy.feature as cfeature
from cartopy.io import img_tiles
CARTOPY_AVAILABLE = True
except ImportError:
CARTOPY_AVAILABLE = False
class GeographicPlotDisplay(Display):
"""
A class for making geographic tracer plot of aircraft, ship or other moving
platform plot.
This is inherited from the :func:`act.plotting.Display`
class and has therefore has the same attributes as that class.
See :func:`act.plotting.Display`
for more information. There are no additional attributes or parameters
to this class.
In order to create geographic plots, ACT needs the Cartopy package to be
installed on your system. More information about
Cartopy go here:https://scitools.org.uk/cartopy/docs/latest/ .
"""
def __init__(self, ds, ds_name=None, **kwargs):
if not CARTOPY_AVAILABLE:
raise ImportError(
'Cartopy needs to be installed on your ' 'system to make geographic display plots.'
)
super().__init__(ds, ds_name, **kwargs)
if self.fig is None:
self.fig = plt.figure(**kwargs)
def geoplot(
self,
data_field=None,
lat_field='lat',
lon_field='lon',
dsname=None,
cbar_label=None,
title=None,
projection=None,
plot_buffer=0.08,
img_tile=None,
img_tile_args={},
tile=8,
stamen='terrain-background',
cartopy_feature=None,
cmap='rainbow',
text=None,
gridlines=True,
**kwargs,
):
"""
Creates a latitude and longitude plot of a time series data set with
data values indicated by color and described with a colorbar.
Latitude values must be in degree north (-90 to 90) and
longitude must be in degree east (-180 to 180).
Parameters
----------
data_field : str
Name of data field in the dataset to plot.
lat_field : str
Name of latitude field in the dataset to use.
lon_field : str
Name of longitude field in the dataset to use.
dsname : str or None
The name of the datastream to plot. Set to None to make ACT
attempt to automatically determine this.
cbar_label : str
Label to use with colorbar. If set to None will attempt
to create label from long_name and units.
title : str
Plot title.
projection : cartopy.crs object
Project to use on plot. See
https://scitools.org.uk/cartopy/docs/latest/reference/projections.html?highlight=projections
plot_buffer : float
Buffer to add around data on plot in lat and lon dimension.
img_tile : str
Image to use for the plot background. Set to None to not use
background image. For all image background types, see:
https://scitools.org.uk/cartopy/docs/v0.16/cartopy/io/img_tiles.html
Default is None.
img_tile_args : dict
Keyword arguments for the chosen img_tile. These arguments can be
found for the corresponding img_tile here:
https://scitools.org.uk/cartopy/docs/v0.16/cartopy/io/img_tiles.html
Default is an empty dictionary.
tile : int
Tile zoom to use with background image. Higher number indicates
more resolution. A value of 8 is typical for a normal sonde plot.
cartopy_feature : list of str or str
Cartopy feature to add to plot.
cmap : str
Color map to use for colorbar.
text : dictionary
Dictionary of {text:[lon,lat]} to add to plot. Can have more
than one set of text to add.
gridlines : boolean
Use latitude and longitude gridlines.
**kwargs : keyword arguments
Any other keyword arguments that will be passed
into :func:`matplotlib.pyplot.scatter` when the figure
is made. See the matplotlib documentation for further details
on what keyword arguments are available.
"""
if dsname is None and len(self._ds.keys()) > 1:
raise ValueError(
'You must choose a datastream when there are 2 '
'or more datasets in the GeographicPlotDisplay '
'object.'
)
elif dsname is None:
dsname = list(self._ds.keys())[0]
if data_field is None:
raise ValueError('You must enter the name of the data ' 'to be plotted.')
if projection is None:
if CARTOPY_AVAILABLE:
projection = ccrs.PlateCarree()
# Extract data from the dataset
try:
lat = self._ds[dsname][lat_field].values
except KeyError:
raise ValueError(
(
'You will need to provide the name of the '
"field if not '{}' to use for latitude "
'data.'
).format(lat_field)
)
try:
lon = self._ds[dsname][lon_field].values
except KeyError:
raise ValueError(
(
'You will need to provide the name of the '
"field if not '{}' to use for longitude "
'data.'
).format(lon_field)
)
# Set up metadata information for display on plot
if cbar_label is None:
try:
cbar_label = (
self._ds[dsname][data_field].attrs['long_name']
+ ' ('
+ self._ds[dsname][data_field].attrs['units']
+ ')'
)
except KeyError:
cbar_label = data_field
lat_limits = [np.nanmin(lat), np.nanmax(lat)]
lon_limits = [np.nanmin(lon), np.nanmax(lon)]
box_size = np.max([np.abs(np.diff(lat_limits)), np.abs(np.diff(lon_limits))])
bx_buf = box_size * plot_buffer
lat_center = np.sum(lat_limits) / 2.0
lon_center = np.sum(lon_limits) / 2.0
lat_limits = [
lat_center - box_size / 2.0 - bx_buf,
lat_center + box_size / 2.0 + bx_buf,
]
lon_limits = [
lon_center - box_size / 2.0 - bx_buf,
lon_center + box_size / 2.0 + bx_buf,
]
data = self._ds[dsname][data_field].values
# Create base plot projection
ax = plt.axes(projection=projection)
plt.subplots_adjust(left=0.01, right=0.99, bottom=0.05, top=0.93)
ax.set_extent([lon_limits[0], lon_limits[1], lat_limits[0], lat_limits[1]], crs=projection)
if title is None:
try:
dim = list(self._ds[dsname][data_field].dims)
ts = pd.to_datetime(str(self._ds[dsname][dim[0]].values[0]))
date = ts.strftime('%Y-%m-%d')
time_str = ts.strftime('%H:%M:%S')
plt.title(' '.join([dsname, 'at', date, time_str]))
except NameError:
plt.title(dsname)
else:
plt.title(title)
if stamen and img_tile is None:
tiler = img_tiles.Stamen(stamen)
ax.add_image(tiler, tile)
warnings.warn(
"Stamen is deprecated in Cartopy and in future versions of ACT, "
"please use img_tile to specify the image background. ")
else:
if img_tile is not None:
tiler = getattr(img_tiles, img_tile)(**img_tile_args)
ax.add_image(tiler, tile)
colorbar_map = None
if cmap is not None:
colorbar_map = matplotlib.colormaps.get_cmap(cmap)
sc = ax.scatter(lon, lat, c=data, cmap=colorbar_map, **kwargs)
cbar = plt.colorbar(sc)
cbar.ax.set_ylabel(cbar_label)
if cartopy_feature is not None:
if isinstance(cartopy_feature, str):
cartopy_feature = [cartopy_feature]
cartopy_feature = [ii.upper() for ii in cartopy_feature]
if 'STATES' in cartopy_feature:
ax.add_feature(cfeature.STATES.with_scale('10m'))
if 'LAND' in cartopy_feature:
ax.add_feature(cfeature.LAND)
if 'OCEAN' in cartopy_feature:
ax.add_feature(cfeature.OCEAN)
if 'COASTLINE' in cartopy_feature:
ax.add_feature(cfeature.COASTLINE)
if 'BORDERS' in cartopy_feature:
ax.add_feature(cfeature.BORDERS, linestyle=':')
if 'LAKES' in cartopy_feature:
ax.add_feature(cfeature.LAKES, alpha=0.5)
if 'RIVERS' in cartopy_feature:
ax.add_feature(cfeature.RIVERS)
if text is not None:
for label, location in text.items():
ax.plot(location[0], location[1], marker='*', color='black')
ax.text(location[0], location[1], label, color='black')
if gridlines:
if projection == ccrs.PlateCarree() or projection == ccrs.Mercator:
gl = ax.gridlines(
crs=projection,
draw_labels=True,
linewidth=1,
color='gray',
alpha=0.5,
linestyle='--',
)
gl.top_labels = False
gl.left_labels = True
gl.bottom_labels = True
gl.right_labels = False
gl.xlabel_style = {'size': 6, 'color': 'gray'}
gl.ylabel_style = {'size': 6, 'color': 'gray'}
else:
# Labels are only currently supported for PlateCarree and Mercator
gl = ax.gridlines(
draw_labels=False,
linewidth=1,
color='gray',
alpha=0.5,
linestyle='--',
)
return ax
<|code_end|>
examples/plotting/plot_aaf_track.py
<|code_start|>"""
Plot ARM AAF Flight Path
--------------------------------
Plot the ARM AAF flight path using the GeographicPlotDisplay
Author: Joe O'Brien
"""
import matplotlib.pyplot as plt
import act
from act.io.icartt import read_icartt
# Call the read_icartt function, which supports input
# for ICARTT (v2.0) formatted files.
# Example file is ARM Aerial Facility Navigation Data
ds = read_icartt(act.tests.EXAMPLE_AAF_ICARTT)
# Use GeographicPlotDisplay for referencing.
# NOTE: Cartopy is needed!
display = act.plotting.GeographicPlotDisplay(ds, figsize=(12, 10))
# Plot the ARM AAF flight track with respect to Pressure Altitude
display.geoplot('press_alt', lat_field='lat', lon_field='lon')
# Display the plot
plt.show()
<|code_end|>
|
Change default behavior of datafilter
Currently the default behavior of act.qc.qcfilter.datafilter is to delete the qc_variable after applying QC but that could delete some valuable information if a user is exploring the data. Recommend that we change the default to False.
|
act/qc/qcfilter.py
<|code_start|>"""
Functions and methods for creating ancillary quality control variables
and filters (masks) which can be used with various corrections
routines in ACT.
"""
import dask
import numpy as np
import xarray as xr
from act.qc import comparison_tests, qctests, bsrn_tests
@xr.register_dataset_accessor('qcfilter')
class QCFilter(qctests.QCTests, comparison_tests.QCTests, bsrn_tests.QCTests):
"""
A class for building quality control variables containing arrays for
filtering data based on a set of test condition typically based on the
values in the data fields. These filters can be used in various
algorithms and calculations within ACT.
"""
def __init__(self, ds):
"""initialize"""
self._ds = ds
def check_for_ancillary_qc(
self,
var_name,
add_if_missing=True,
cleanup=False,
flag_type=False
):
"""
Method to check if a quality control variable exist in the dataset
and return the quality control varible name.
Will call create_qc_variable() to make variable if does not exist
and update_ancillary_variable() to ensure linkage between data and
quality control variable. Can also be used just to get the
corresponding quality control variable name with adding if
it is missing.
Parameters
----------
var_name : str
Data variable name.
add_if_missing : boolean
Add quality control variable if missing from teh dataset. Will raise
and exception if the var_name does not exist in Dataset. Set to False
to not raise exception.
cleanup : boolean
Option to run qc.clean.cleanup() method on the dataset
to ensure the dataset was updated from ARM QC to the
correct standardized QC.
flag_type : boolean
Indicating the QC variable uses flag_values instead of
flag_masks.
Returns
-------
qc_var_name : str or None
Name of existing or new quality control variable. Returns
None if no existing quality control variable is found and
add_if_missing is set to False.
Examples
--------
.. code-block:: python
from act.tests import EXAMPLE_METE40
from act.io.arm import read_arm_netcdf
ds = read_arm_netcdf(EXAMPLE_METE40, cleanup_qc=True)
qc_var_name = ds.qcfilter.check_for_ancillary_qc('atmos_pressure')
print(f'qc_var_name: {qc_var_name}')
qc_var_name = ds.qcfilter.check_for_ancillary_qc('the_greatest_variable_ever',
add_if_missing=False)
print(f'qc_var_name: {qc_var_name}')
"""
qc_var_name = None
try:
ancillary_variables = self._ds[var_name].attrs['ancillary_variables']
if isinstance(ancillary_variables, str):
ancillary_variables = ancillary_variables.split()
for var in ancillary_variables:
for attr, value in self._ds[var].attrs.items():
if attr == 'standard_name' and 'quality_flag' in value:
qc_var_name = var
if add_if_missing and qc_var_name is None:
qc_var_name = self._ds.qcfilter.create_qc_variable(var_name, flag_type=flag_type)
except KeyError:
# Since no ancillary_variables exist look for ARM style of QC
# variable name. If it exists use it else create new
# QC varaible.
if add_if_missing:
try:
self._ds['qc_' + var_name]
qc_var_name = 'qc_' + var_name
except KeyError:
qc_var_name = self._ds.qcfilter.create_qc_variable(
var_name, flag_type=flag_type
)
# Make sure data varaible has a variable attribute linking
# data variable to QC variable.
if add_if_missing:
self._ds.qcfilter.update_ancillary_variable(var_name, qc_var_name)
# Clean up quality control variables to the requried standard in the
# xarray dataset.
if cleanup:
self._ds.clean.cleanup(handle_missing_value=True, link_qc_variables=False)
return qc_var_name
def create_qc_variable(
self, var_name,
flag_type=False,
flag_values_set_value=0,
qc_var_name=None
):
"""
Method to create a quality control variable in the dataset.
Will try not to destroy the qc variable by appending numbers
to the variable name if needed.
Parameters
----------
var_name : str
Data variable name.
flag_type : boolean
If an integer flag type should be created instead of
bitpacked mask type. Will create flag_values instead of
flag_masks.
flag_values_set_value : int
Initial flag value to use when initializing array.
qc_var_name : str
Optional new quality control variable name. If not set
will create one using \\"qc\\_\\" prepended to the data
variable name. If the name given or created is taken
will append a number that does not have a conflict.
Returns
-------
qc_var_name : str
Name of new quality control variable created.
Examples
--------
.. code-block:: python
from act.tests import EXAMPLE_AOSMET
from act.io.arm import read_arm_netcdf
ds = read_arm_netcdf(EXAMPLE_AOSMET)
qc_var_name = ds.qcfilter.create_qc_variable('temperature_ambient')
print(qc_var_name)
print(ds[qc_var_name])
"""
# Make QC variable long name. The variable long_name attribute
# may not exist so catch that error and set to default.
try:
qc_variable_long_name = (
'Quality check results on field: ' + self._ds[var_name].attrs['long_name']
)
except KeyError:
qc_variable_long_name = 'Quality check results for ' + var_name
# Make a new quality control variable name. Check if exists in the
# dataset. If so loop through creation of new name until one is
# found that will not replace existing variable.
if qc_var_name is None:
qc_var_name = 'qc_' + var_name
variable_names = list(self._ds.data_vars)
if qc_var_name in variable_names:
for ii in range(1, 100):
temp_qc_var_name = '_'.join([qc_var_name, str(ii)])
if temp_qc_var_name not in variable_names:
qc_var_name = temp_qc_var_name
break
# Create the QC variable filled with 0 values matching the
# shape of data variable.
try:
qc_data = dask.array.from_array(
np.zeros_like(self._ds[var_name].values, dtype=np.int32),
chunks=self._ds[var_name].data.chunksize,
)
except AttributeError:
qc_data = np.zeros_like(self._ds[var_name].values, dtype=np.int32)
# Updating to use coords instead of dim, which caused a loss of
# attribuets as noted in Issue 347
self._ds[qc_var_name] = xr.DataArray(
data=qc_data,
coords=self._ds[var_name].coords,
attrs={'long_name': qc_variable_long_name, 'units': '1'},
)
# Update if using flag_values and don't want 0 to be default value.
if flag_type and flag_values_set_value != 0:
self._ds[qc_var_name].values = self._ds[qc_var_name].values + int(
flag_values_set_value
)
# Add requried variable attributes.
if flag_type:
self._ds[qc_var_name].attrs['flag_values'] = []
else:
self._ds[qc_var_name].attrs['flag_masks'] = []
self._ds[qc_var_name].attrs['flag_meanings'] = []
self._ds[qc_var_name].attrs['flag_assessments'] = []
self._ds[qc_var_name].attrs['standard_name'] = 'quality_flag'
self.update_ancillary_variable(var_name, qc_var_name=qc_var_name)
return qc_var_name
def update_ancillary_variable(self, var_name, qc_var_name=None):
"""
Method to check if ancillary_variables variable attribute
is set with quality control variable name.
Parameters
----------
var_name : str
Data variable name.
qc_var_name : str
quality control variable name. If not given will attempt
to get the name from data variable ancillary_variables
attribute.
Examples
--------
.. code-block:: python
from act.tests import EXAMPLE_AOSMET
from act.io.arm import read_arm_netcdf
ds = read_arm_netcdf(EXAMPLE_AOSMET)
var_name = 'temperature_ambient'
qc_var_name = ds.qcfilter.create_qc_variable(var_name)
del ds[var_name].attrs['ancillary_variables']
ds.qcfilter.update_ancillary_variable(var_name, qc_var_name)
print(ds[var_name].attrs['ancillary_variables'])
"""
if qc_var_name is None:
qc_var_name = self._ds.qcfilter.check_for_ancillary_qc(var_name, add_if_missing=False)
if qc_var_name is None:
return
try:
ancillary_variables = self._ds[var_name].attrs['ancillary_variables']
if qc_var_name not in ancillary_variables:
ancillary_variables = ' '.join([ancillary_variables, qc_var_name])
except KeyError:
ancillary_variables = qc_var_name
self._ds[var_name].attrs['ancillary_variables'] = ancillary_variables
def add_test(
self,
var_name,
index=None,
test_number=None,
test_meaning=None,
test_assessment='Bad',
flag_value=False,
recycle=False,
):
"""
Method to add a new test/filter to a quality control variable.
Parameters
----------
var_name : str
data variable name
index : int, bool, list of int or bool, numpy array, tuple of numpy arrays, None
Indexes into quality control array to set the test bit.
If not set or set to None will not set the test on any
element of the quality control variable but will still
add the test to the flag_masks, flag_meanings and
flag_assessments attributes.
test_number : int
Test number to use. If keyword is not set will use first
available test bit/test number.
test_meaning : str
String describing the test. Will be added to flag_meanings
variable attribute.
test_assessment : str
String describing the test assessment. If not set will use
"Bad" as the string to append to flag_assessments. Will
update to be capitalized.
flag_value : boolean
Switch to use flag_values integer quality control.
recyle : boolean
Option to use number less than next highest test if available. For example
tests 1, 2, 4, 5 are set. Set to true the next test chosen will be 3, else
will be 6.
Returns
-------
test_dict : dict
A dictionary containing information added to the QC
variable.
Examples
--------
.. code-block:: python
result = ds.qcfilter.add_test(var_name, test_meaning='Birds!')
"""
test_dict = {}
if test_meaning is None:
raise ValueError(
'You need to provide a value for test_meaning '
'keyword when calling the add_test method'
)
# This ensures the indexing will work even if given float values.
# Preserves tuples from np.where() or boolean arrays for standard
# python indexing.
if index is not None and not isinstance(index, (np.ndarray, tuple)):
index = np.array(index)
if index.dtype.kind not in np.typecodes['AllInteger']:
index = index.astype(int)
# Ensure assessment is capitalized to be consistent
test_assessment = test_assessment.capitalize()
qc_var_name = self._ds.qcfilter.check_for_ancillary_qc(var_name, flag_type=flag_value)
if test_number is None:
test_number = self._ds.qcfilter.available_bit(qc_var_name, recycle=recycle)
self._ds.qcfilter.set_test(var_name, index, test_number, flag_value)
if flag_value:
try:
self._ds[qc_var_name].attrs['flag_values'].append(test_number)
except KeyError:
self._ds[qc_var_name].attrs['flag_values'] = [test_number]
else:
# Determine if flag_masks test number is too large for current data type.
# If so up convert data type.
flag_masks = np.array(self._ds[qc_var_name].attrs['flag_masks'])
mask_dtype = flag_masks.dtype
if not np.issubdtype(mask_dtype, np.integer):
mask_dtype = np.uint32
if np.iinfo(mask_dtype).max - set_bit(0, test_number) <= -1:
if mask_dtype == np.int8 or mask_dtype == np.uint8:
mask_dtype = np.uint16
elif mask_dtype == np.int16 or mask_dtype == np.uint16:
mask_dtype = np.uint32
elif mask_dtype == np.int32 or mask_dtype == np.uint32:
mask_dtype = np.uint64
flag_masks = flag_masks.astype(mask_dtype)
flag_masks = np.append(flag_masks, np.array(set_bit(0, test_number), dtype=mask_dtype))
self._ds[qc_var_name].attrs['flag_masks'] = list(flag_masks)
try:
self._ds[qc_var_name].attrs['flag_meanings'].append(test_meaning)
except KeyError:
self._ds[qc_var_name].attrs['flag_meanings'] = [test_meaning]
try:
self._ds[qc_var_name].attrs['flag_assessments'].append(test_assessment)
except KeyError:
self._ds[qc_var_name].attrs['flag_assessments'] = [test_assessment]
test_dict['test_number'] = test_number
test_dict['test_meaning'] = test_meaning
test_dict['test_assessment'] = test_assessment
test_dict['qc_variable_name'] = qc_var_name
test_dict['variable_name'] = var_name
return test_dict
def remove_test(
self,
var_name=None,
test_number=None,
qc_var_name=None,
flag_value=False,
flag_values_reset_value=0,
):
"""
Method to remove a test/filter from a quality control variable. Must set
var_name or qc_var_name.
Parameters
----------
var_name : str or None
Data variable name.
test_number : int
Test number to remove.
qc_var_name : str or None
Quality control variable name. Ignored if var_name is set.
flag_value : boolean
Switch to use flag_values integer quality control.
flag_values_reset_value : int
Value to use when resetting a flag_values value to not be set.
Examples
--------
.. code-block:: python
ds.qcfilter.remove_test(var_name, test_number=3)
"""
if test_number is None:
raise ValueError(
'You need to provide a value for test_number '
'keyword when calling the remove_test() method'
)
if var_name is None and qc_var_name is None:
raise ValueError(
'You need to provide a value for var_name or qc_var_name '
'keyword when calling the remove_test() method'
)
if var_name is not None:
qc_var_name = self._ds.qcfilter.check_for_ancillary_qc(var_name)
# Determine which index is using the test number
index = None
if flag_value:
flag_values = self._ds[qc_var_name].attrs['flag_values']
for ii, flag_num in enumerate(flag_values):
if flag_num == test_number:
index = ii
break
else:
flag_masks = self._ds[qc_var_name].attrs['flag_masks']
for ii, bit_num in enumerate(flag_masks):
if parse_bit(bit_num)[0] == test_number:
index = ii
break
# If can't find the index of test return before doing anything.
if index is None:
return
if flag_value:
remove_index = self._ds.qcfilter.get_qc_test_mask(
var_name=var_name,
qc_var_name=qc_var_name,
test_number=test_number,
return_index=True,
flag_value=True,
)
self._ds.qcfilter.unset_test(
var_name=var_name,
qc_var_name=qc_var_name,
index=remove_index,
test_number=test_number,
flag_value=flag_value,
flag_values_reset_value=flag_values_reset_value,
)
del flag_values[index]
self._ds[qc_var_name].attrs['flag_values'] = flag_values
else:
remove_index = self._ds.qcfilter.get_qc_test_mask(
var_name=var_name,
qc_var_name=qc_var_name,
test_number=test_number,
return_index=True,
)
self._ds.qcfilter.unset_test(
var_name=var_name,
qc_var_name=qc_var_name,
index=remove_index,
test_number=test_number,
flag_value=flag_value,
)
if isinstance(flag_masks, list):
del flag_masks[index]
else:
flag_masks = np.delete(flag_masks, index)
self._ds[qc_var_name].attrs['flag_masks'] = flag_masks
flag_meanings = self._ds[qc_var_name].attrs['flag_meanings']
del flag_meanings[index]
self._ds[qc_var_name].attrs['flag_meanings'] = flag_meanings
flag_assessments = self._ds[qc_var_name].attrs['flag_assessments']
del flag_assessments[index]
self._ds[qc_var_name].attrs['flag_assessments'] = flag_assessments
def set_test(self, var_name, index=None, test_number=None, flag_value=False):
"""
Method to set a test/filter in a quality control variable.
Parameters
----------
var_name : str
Data variable name.
index : int or list or numpy array
Index to set test in quality control array. If want to
unset all values will need to pass in index of all values.
test_number : int
Test number to set.
flag_value : boolean
Switch to use flag_values integer quality control.
Examples
--------
.. code-block:: python
index = [0, 1, 2, 30]
ds.qcfilter.set_test(var_name, index=index, test_number=2)
"""
qc_var_name = self._ds.qcfilter.check_for_ancillary_qc(var_name)
qc_variable = np.array(self._ds[qc_var_name].values)
# Ensure the qc_variable data type is integer. This ensures bitwise comparison
# will not cause an error.
if qc_variable.dtype.kind not in np.typecodes['AllInteger']:
qc_variable = qc_variable.astype(int)
# Determine if test number is too large for current data type. If so
# up convert data type.
dtype = qc_variable.dtype
if np.iinfo(dtype).max - set_bit(0, test_number) < -1:
if dtype == np.int8:
dtype = np.int16
elif dtype == np.int16:
dtype = np.int32
elif dtype == np.int32:
dtype = np.int64
qc_variable = qc_variable.astype(dtype)
if index is not None:
if flag_value:
qc_variable[index] = test_number
else:
if bool(np.shape(index)):
qc_variable[index] = set_bit(qc_variable[index], test_number)
elif index == 0:
qc_variable = set_bit(qc_variable, test_number)
self._ds[qc_var_name].values = qc_variable
def unset_test(
self,
var_name=None,
qc_var_name=None,
index=None,
test_number=None,
flag_value=False,
flag_values_reset_value=0,
):
"""
Method to unset a test/filter from a quality control variable.
Parameters
----------
var_name : str or None
Data variable name.
qc_var_name : str or None
Quality control variable name. Ignored if var_name is set.
index : int or list or numpy array
Index to unset test in quality control array. If want to
unset all values will need to pass in index of all values.
test_number : int
Test number to remove.
flag_value : boolean
Switch to use flag_values integer quality control.
flag_values_reset_value : int
Value to use when resetting a flag_values value to not be set.
Examples
--------
.. code-block:: python
ds.qcfilter.unset_test(var_name, index=range(10, 100), test_number=2)
"""
if index is None:
return
if var_name is None and qc_var_name is None:
raise ValueError(
'You need to provide a value for var_name or qc_var_name '
'keyword when calling the unset_test() method'
)
if var_name is not None:
qc_var_name = self._ds.qcfilter.check_for_ancillary_qc(var_name)
# Get QC variable
qc_variable = self._ds[qc_var_name].values
# Ensure the qc_variable data type is integer. This ensures bitwise comparison
# will not cause an error.
if qc_variable.dtype.kind not in np.typecodes['AllInteger']:
qc_variable = qc_variable.astype(int)
if flag_value:
qc_variable[index] = flag_values_reset_value
else:
qc_variable[index] = unset_bit(qc_variable[index], test_number)
self._ds[qc_var_name].values = qc_variable
def available_bit(self, qc_var_name, recycle=False):
"""
Method to determine next available bit or flag to use with a QC test.
Will check for flag_masks first and if not found will check for
flag_values. This will drive how the next value is chosen.
Parameters
----------
qc_var_name : str
Quality control variable name.
recycle : boolean
Option to look for a bit (test) not in use starting from 1.
If a test is not defined will return the lowest number, else
will just use next highest number.
Returns
-------
test_num : int
Next available test number.
Examples
--------
.. code-block:: python
from act.tests import EXAMPLE_METE40
from act.io.arm import read_arm_netcdf
ds = read_arm_netcdf(EXAMPLE_METE40, cleanup_qc=True)
test_number = ds.qcfilter.available_bit('qc_atmos_pressure')
print(test_number)
"""
try:
flag_masks = self._ds[qc_var_name].attrs['flag_masks']
flag_value = False
except KeyError:
try:
flag_masks = self._ds[qc_var_name].attrs['flag_values']
flag_value = True
except KeyError:
try:
self._ds[qc_var_name].attrs['flag_values']
flag_masks = self._ds[qc_var_name].attrs['flag_masks']
flag_value = False
except KeyError:
raise ValueError(
'Problem getting next value from '
'available_bit(). flag_values and '
'flag_masks not set as expected'
)
if flag_masks == []:
next_bit = 1
else:
if flag_value:
if recycle:
next_bit = min(set(range(1, 100000)) - set(flag_masks))
else:
next_bit = max(flag_masks) + 1
else:
if recycle:
tests = [parse_bit(mask)[0] for mask in flag_masks]
next_bit = min(set(range(1, 63)) - set(tests))
else:
next_bit = parse_bit(max(flag_masks))[0] + 1
return int(next_bit)
def get_qc_test_mask(
self,
var_name=None,
test_number=None,
qc_var_name=None,
flag_value=False,
return_index=False,
):
"""
Returns a numpy array of False or True where a particular
flag or bit is set in a numpy array. Must set var_name or qc_var_name
when calling.
Parameters
----------
var_name : str or None
Data variable name.
test_number : int
Test number to return array where test is set.
qc_var_name : str or None
Quality control variable name. Ignored if var_name is set.
flag_value : boolean
Switch to use flag_values integer quality control.
return_index : boolean
Return a numpy array of index numbers into QC array where the
test is set instead of False or True mask.
Returns
-------
test_mask : numpy bool array or numpy integer array
A numpy boolean array with False or True where the test number or
bit was set, or numpy integer array of indexes where test is True.
Examples
--------
.. code-block:: python
from act.io.arm import read_arm_netcdf
from act.tests import EXAMPLE_IRT25m20s
ds = read_arm_netcdf(EXAMPLE_IRT25m20s)
var_name = "inst_up_long_dome_resist"
result = ds.qcfilter.add_test(
var_name, index=[0, 1, 2], test_meaning="Birds!"
)
qc_var_name = result["qc_variable_name"]
mask = ds.qcfilter.get_qc_test_mask(
var_name, result["test_number"], return_index=True
)
print(mask)
array([0, 1, 2])
mask = ds.qcfilter.get_qc_test_mask(var_name, result["test_number"])
print(mask)
array([True, True, True, ..., False, False, False])
data = ds[var_name].values
print(data[mask])
array([7.84, 7.8777, 7.8965], dtype=float32)
import numpy as np
data[mask] = np.nan
print(data)
array([nan, nan, nan, ..., 7.6705, 7.6892, 7.6892], dtype=float32)
"""
if var_name is None and qc_var_name is None:
raise ValueError(
'You need to provide a value for var_name or qc_var_name '
'keyword when calling the get_qc_test_mask() method'
)
if test_number is None:
raise ValueError(
'You need to provide a value for test_number '
'keyword when calling the get_qc_test_mask() method'
)
if var_name is not None:
qc_var_name = self._ds.qcfilter.check_for_ancillary_qc(var_name)
qc_variable = self._ds[qc_var_name].values
# Ensure the qc_variable data type is integer. This ensures bitwise comparison
# will not cause an error.
if qc_variable.dtype.kind not in np.typecodes['AllInteger']:
qc_variable = qc_variable.astype(int)
if flag_value:
tripped = qc_variable == test_number
else:
check_bit = set_bit(0, test_number) & qc_variable
tripped = check_bit > 0
test_mask = np.full(qc_variable.shape, False, dtype='bool')
# Make sure test_mask is an array. If qc_variable is scalar will
# be retuned from np.zeros as scalar.
test_mask = np.atleast_1d(test_mask)
test_mask[tripped] = True
test_mask = np.ma.make_mask(test_mask, shrink=False)
if return_index:
test_mask = np.where(test_mask)[0]
return test_mask
def get_masked_data(
self,
var_name,
rm_assessments=None,
rm_tests=None,
return_nan_array=False,
ma_fill_value=None,
return_inverse=False,
):
"""
Returns a numpy masked array containing data and mask or
a numpy float array with masked values set to NaN.
Parameters
----------
var_name : str
Data variable name.
rm_assessments : str or list of str
Assessment name to exclude from returned data.
rm_tests : int or list of int
Test numbers to exclude from returned data. This is the test
number (or bit position number) not the mask number.
return_nan_array : boolean
Return a numpy array with filtered ( or masked) values
set to numpy NaN value. If the data is type int will upconvert
to numpy float to allow setting NaN value.
ma_fill_value : int or float (or str?)
The numpy masked array fill_value used in creation of the the
masked array. If the datatype needs to be upconverted to allow
the fill value to be used, data will be upconverted.
return_inverse : boolean
Invert the masked array mask or return data array where mask is set
to False instead of True set to NaN. Useful for overplotting
where failing.
Returns
-------
variable : numpy masked array or numpy float array
Default is to return a numpy masked array with the mask set to
True where the test with requested assessment or test number
was found set.
If return_nan_array is True will return numpy array upconverted
to float with locations where the test with requested assessment
or test number was found set converted to NaN.
Examples
--------
.. code-block:: python
from act.io.arm import read_arm_netcdf
from act.tests import EXAMPLE_IRT25m20s
ds = read_arm_netcdf(EXAMPLE_IRT25m20s)
var_name = "inst_up_long_dome_resist"
result = ds.qcfilter.add_test(
var_name, index=[0, 1, 2], test_meaning="Birds!"
)
data = ds.qcfilter.get_masked_data(
var_name, rm_assessments=["Bad", "Indeterminate"]
)
print(data)
masked_array(
data=[..., 7.670499801635742, 7.689199924468994, 7.689199924468994],
mask=[..., False, False, False],
fill_value=1e20,
dtype=float32,
)
"""
qc_var_name = self._ds.qcfilter.check_for_ancillary_qc(var_name, add_if_missing=False)
flag_value = False
flag_values = None
flag_masks = None
flag_assessments = None
try:
flag_assessments = self._ds[qc_var_name].attrs['flag_assessments']
flag_masks = self._ds[qc_var_name].attrs['flag_masks']
except KeyError:
pass
try:
flag_values = self._ds[qc_var_name].attrs['flag_values']
flag_value = True
except KeyError:
pass
test_numbers = []
if rm_tests is not None:
if isinstance(rm_tests, (int, float, str)):
rm_tests = [int(rm_tests)]
test_numbers.extend(rm_tests)
if rm_assessments is not None:
if isinstance(rm_assessments, str):
rm_assessments = [rm_assessments]
if flag_masks is not None:
test_nums = [parse_bit(mask)[0] for mask in flag_masks]
if flag_values is not None:
test_nums = flag_values
rm_assessments = [x.lower() for x in rm_assessments]
if flag_assessments is not None:
for ii, assessment in enumerate(flag_assessments):
if assessment.lower() in rm_assessments:
test_numbers.append(test_nums[ii])
# Make the list of test numbers to mask unique
test_numbers = list(set(test_numbers))
# Create mask of indexes by looking where each test is set
variable = self._ds[var_name].values
nan_dtype = np.float32
if variable.dtype in (np.float64, np.int64):
nan_dtype = np.float64
mask = np.zeros(variable.shape, dtype=bool)
for test in test_numbers:
mask = mask | self._ds.qcfilter.get_qc_test_mask(var_name, test, flag_value=flag_value)
# Convert data numpy array into masked array
try:
variable = np.ma.array(variable, mask=mask, fill_value=ma_fill_value)
except TypeError:
variable = np.ma.array(
variable,
mask=mask,
fill_value=ma_fill_value,
dtype=np.array(ma_fill_value).dtype,
)
# If requested switch array from where data is not failing tests
# to where data is failing tests. This can be used when over plotting
# where the data if failing the tests.
if return_inverse:
mask = variable.mask
mask = np.invert(mask)
variable.mask = mask
# If asked to return numpy array with values set to NaN
if return_nan_array:
variable = variable.astype(nan_dtype)
variable = variable.filled(fill_value=np.nan)
return variable
def datafilter(
self,
variables=None,
rm_assessments=None,
rm_tests=None,
verbose=False,
del_qc_var=True,
):
"""
Method to apply quality control variables to data variables by
changing the data values in the dataset using quality control variables.
The data is updated with failing data set to
NaN. This can be used to update the data variable in the xarray
dataset for use with xarray methods to perform analysis on the data
since those methods don't read the quality control variables.
Parameters
----------
variables : None or str or list of str
Data variable names to process. If set to None will update all
data variables.
rm_assessments : str or list of str
Assessment names listed under quality control varible flag_assessments
to exclude from returned data. Examples include
['Bad', 'Incorrect', 'Indeterminate', 'Suspect']
rm_tests : int or list of int
Test numbers listed under quality control variable to exclude from
returned data. This is the test
number (or bit position number) not the mask number.
verbose : boolean
Print processing information.
del_qc_var : boolean
Option to delete quality control variable after processing. Since
the data values can not be determined after they are set to NaN
and xarray method processing would also process the quality control
variables, the default is to remove the quality control data
variables.
Examples
--------
.. code-block:: python
from act.io.arm import read_arm_netcdf
from act.tests import EXAMPLE_MET1
ds = read_arm_netcdf(EXAMPLE_MET1)
ds.clean.cleanup()
var_name = "atmos_pressure"
ds_1 = ds.nanmean()
ds.qcfilter.add_less_test(var_name, 99, test_assessment="Bad")
ds.qcfilter.datafilter(rm_assessments="Bad")
ds_2 = ds.nanmean()
print("All_data =", ds_1[var_name].values)
All_data = 98.86098
print("Bad_Removed =", ds_2[var_name].values)
Bad_Removed = 99.15148
"""
if rm_assessments is None and rm_tests is None:
raise ValueError('Need to set rm_assessments or rm_tests option')
if variables is not None and isinstance(variables, str):
variables = [variables]
if variables is None:
variables = list(self._ds.data_vars)
for var_name in variables:
qc_var_name = self.check_for_ancillary_qc(var_name, add_if_missing=False, cleanup=False)
if qc_var_name is None:
if verbose:
if var_name in ['base_time', 'time_offset']:
continue
try:
if self._ds[var_name].attrs['standard_name'] == 'quality_flag':
continue
except KeyError:
pass
print(f'No quality control variable for {var_name} found '
f'in call to .qcfilter.datafilter()')
continue
# Need to return data as Numpy array with NaN values. Setting the Dask array
# to Numpy masked array does not work with other tools.
data = self.get_masked_data(
var_name,
rm_assessments=rm_assessments,
rm_tests=rm_tests,
return_nan_array=True
)
# If data was orginally stored as Dask array return values to Dataset as Dask array
# else set as Numpy array.
try:
self._ds[var_name].data = dask.array.from_array(
data, chunks=self._ds[var_name].data.chunksize)
except AttributeError:
self._ds[var_name].values = data
# Adding information on filtering to history attribute
flag_masks = None
flag_assessments = None
flag_meanings = None
try:
flag_assessments = list(self._ds[qc_var_name].attrs['flag_assessments'])
flag_masks = list(self._ds[qc_var_name].attrs['flag_masks'])
flag_meanings = list(self._ds[qc_var_name].attrs['flag_meanings'])
except KeyError:
pass
# Add comment to history for each test that's filtered out
if isinstance(rm_tests, int):
rm_tests = [rm_tests]
if rm_tests is not None:
for test in list(rm_tests):
test = 2 ** (test - 1)
if test in flag_masks:
index = flag_masks.index(test)
comment = ''.join(['act.qc.datafilter: ', flag_meanings[index]])
if 'history' in self._ds[var_name].attrs.keys():
self._ds[var_name].attrs['history'] += '\n' + comment
else:
self._ds[var_name].attrs['history'] = comment
if isinstance(rm_assessments, str):
rm_assessments = [rm_assessments]
if rm_assessments is not None:
for assessment in rm_assessments:
if assessment in flag_assessments:
index = [i for i, e in enumerate(flag_assessments) if e == assessment]
for ind in index:
comment = ''.join(['act.qc.datafilter: ', flag_meanings[ind]])
if 'history' in self._ds[var_name].attrs.keys():
self._ds[var_name].attrs['history'] += '\n' + comment
else:
self._ds[var_name].attrs['history'] = comment
# If requested delete quality control variable
if del_qc_var:
del self._ds[qc_var_name]
if verbose:
print(f'Deleting {qc_var_name} from dataset')
def set_bit(array, bit_number):
"""
Function to set a quality control bit given a scalar or
array of values and a bit number.
Parameters
----------
array : int list of int or numpy array of int
The bitpacked array to set the bit number.
bit_number : int
The bit (or test) number to set starting at 1.
Returns
-------
array : int, numpy array, tuple, list
Integer or numpy array with bit set for each element of the array.
Returned in same type.
Examples
--------
Example use setting bit 2 to an array called data:
.. code-block:: python
from act.qc.qcfilter import set_bit
data = np.array(range(0, 7))
data = set_bit(data, 2)
print(data)
array([2, 3, 2, 3, 6, 7, 6])
"""
was_list = False
was_tuple = False
if isinstance(array, list):
array = np.array(array)
was_list = True
if isinstance(array, tuple):
array = np.array(array)
was_tuple = True
if bit_number > 0:
array |= 1 << bit_number - 1
if was_list:
array = list(array)
if was_tuple:
array = tuple(array)
return array
def unset_bit(array, bit_number):
"""
Function to remove a quality control bit given a
scalar or array of values and a bit number.
Parameters
----------
array : int list of int or numpy array
Array of integers containing bit packed numbers.
bit_number : int
Bit number to remove starting at 1.
Returns
-------
array : int or numpy array
Returns same data type as array entered with bit removed. Will
fail gracefully if the bit requested to be removed was not set.
Examples
--------
.. code-block:: python
from act.qc.qcfilter import set_bit, unset_bit
data = set_bit([0, 1, 2, 3, 4], 2)
data = set_bit(data, 3)
print(data)
[6, 7, 6, 7, 6]
data = unset_bit(data, 2)
print(data)
[4, 5, 4, 5, 4]
"""
was_list = False
was_tuple = False
if isinstance(array, list):
array = np.array(array)
was_list = True
if isinstance(array, tuple):
array = np.array(array)
was_tuple = True
if bit_number > 0:
array &= ~(1 << bit_number - 1)
if was_list:
array = list(array)
if was_tuple:
array = tuple(array)
return array
def parse_bit(qc_bit):
"""
Given a single integer value, return bit positions.
Parameters
----------
qc_bit : int or numpy int
Bit packed integer number to be parsed.
Returns
-------
bit_number : numpy.int32 array
Array containing all bit numbers of the bit packed number.
If no bits set returns empty array.
Examples
--------
.. code-block:: python
from act.qc.qcfilter import parse_bit
parse_bit(7)
array([1, 2, 3], dtype=int32)
"""
if isinstance(qc_bit, (list, tuple, np.ndarray)):
if len(qc_bit) > 1:
raise ValueError('Must be a single value.')
qc_bit = qc_bit[0]
if qc_bit < 0:
raise ValueError('Must be a positive integer.')
# Convert integer value to single element numpy array of type unsigned integer 64
value = np.array([qc_bit]).astype(">u8")
# Convert value to view containing only unsigned integer 8 data type. This
# is required for the numpy unpackbits function which only works with
# unsigned integer 8 bit data type.
value = value.view("u1")
# Unpack bits using numpy into array of 1 where bit is set and convert into boolean array
index = np.unpackbits(value).astype(bool)
# Create range of numbers from 64 to 1 and subset where unpackbits found a bit set.
bit_number = np.arange(index.size, 0, -1)[index]
# Flip the array to increasing numbers to match historical method
bit_number = np.flip(bit_number)
# bit_number = []
# qc_bit = int(qc_bit)
# counter = 0
# while qc_bit > 0:
# temp_value = qc_bit % 2
# qc_bit = qc_bit >> 1
# counter += 1
# if temp_value == 1:
# bit_number.append(counter)
# Convert data type into expected type
bit_number = np.asarray(bit_number, dtype=np.int32)
return bit_number
<|code_end|>
|
act/qc/qcfilter.py
<|code_start|>"""
Functions and methods for creating ancillary quality control variables
and filters (masks) which can be used with various corrections
routines in ACT.
"""
import dask
import numpy as np
import xarray as xr
from act.qc import comparison_tests, qctests, bsrn_tests
@xr.register_dataset_accessor('qcfilter')
class QCFilter(qctests.QCTests, comparison_tests.QCTests, bsrn_tests.QCTests):
"""
A class for building quality control variables containing arrays for
filtering data based on a set of test condition typically based on the
values in the data fields. These filters can be used in various
algorithms and calculations within ACT.
"""
def __init__(self, ds):
"""initialize"""
self._ds = ds
def check_for_ancillary_qc(
self,
var_name,
add_if_missing=True,
cleanup=False,
flag_type=False
):
"""
Method to check if a quality control variable exist in the dataset
and return the quality control varible name.
Will call create_qc_variable() to make variable if does not exist
and update_ancillary_variable() to ensure linkage between data and
quality control variable. Can also be used just to get the
corresponding quality control variable name with adding if
it is missing.
Parameters
----------
var_name : str
Data variable name.
add_if_missing : boolean
Add quality control variable if missing from teh dataset. Will raise
and exception if the var_name does not exist in Dataset. Set to False
to not raise exception.
cleanup : boolean
Option to run qc.clean.cleanup() method on the dataset
to ensure the dataset was updated from ARM QC to the
correct standardized QC.
flag_type : boolean
Indicating the QC variable uses flag_values instead of
flag_masks.
Returns
-------
qc_var_name : str or None
Name of existing or new quality control variable. Returns
None if no existing quality control variable is found and
add_if_missing is set to False.
Examples
--------
.. code-block:: python
from act.tests import EXAMPLE_METE40
from act.io.armfiles import read_netcdf
ds = read_netcdf(EXAMPLE_METE40, cleanup_qc=True)
qc_var_name = ds.qcfilter.check_for_ancillary_qc('atmos_pressure')
print(f'qc_var_name: {qc_var_name}')
qc_var_name = ds.qcfilter.check_for_ancillary_qc('the_greatest_variable_ever',
add_if_missing=False)
print(f'qc_var_name: {qc_var_name}')
"""
qc_var_name = None
try:
ancillary_variables = self._ds[var_name].attrs['ancillary_variables']
if isinstance(ancillary_variables, str):
ancillary_variables = ancillary_variables.split()
for var in ancillary_variables:
for attr, value in self._ds[var].attrs.items():
if attr == 'standard_name' and 'quality_flag' in value:
qc_var_name = var
if add_if_missing and qc_var_name is None:
qc_var_name = self._ds.qcfilter.create_qc_variable(var_name, flag_type=flag_type)
except KeyError:
# Since no ancillary_variables exist look for ARM style of QC
# variable name. If it exists use it else create new
# QC varaible.
if add_if_missing:
try:
self._ds['qc_' + var_name]
qc_var_name = 'qc_' + var_name
except KeyError:
qc_var_name = self._ds.qcfilter.create_qc_variable(
var_name, flag_type=flag_type
)
# Make sure data varaible has a variable attribute linking
# data variable to QC variable.
if add_if_missing:
self._ds.qcfilter.update_ancillary_variable(var_name, qc_var_name)
# Clean up quality control variables to the requried standard in the
# xarray dataset.
if cleanup:
self._ds.clean.cleanup(handle_missing_value=True, link_qc_variables=False)
return qc_var_name
def create_qc_variable(
self, var_name,
flag_type=False,
flag_values_set_value=0,
qc_var_name=None
):
"""
Method to create a quality control variable in the dataset.
Will try not to destroy the qc variable by appending numbers
to the variable name if needed.
Parameters
----------
var_name : str
Data variable name.
flag_type : boolean
If an integer flag type should be created instead of
bitpacked mask type. Will create flag_values instead of
flag_masks.
flag_values_set_value : int
Initial flag value to use when initializing array.
qc_var_name : str
Optional new quality control variable name. If not set
will create one using \\"qc\\_\\" prepended to the data
variable name. If the name given or created is taken
will append a number that does not have a conflict.
Returns
-------
qc_var_name : str
Name of new quality control variable created.
Examples
--------
.. code-block:: python
from act.tests import EXAMPLE_AOSMET
from act.io.armfiles import read_netcdf
ds = read_netcdf(EXAMPLE_AOSMET)
qc_var_name = ds.qcfilter.create_qc_variable('temperature_ambient')
print(qc_var_name)
print(ds[qc_var_name])
"""
# Make QC variable long name. The variable long_name attribute
# may not exist so catch that error and set to default.
try:
qc_variable_long_name = (
'Quality check results on field: ' + self._ds[var_name].attrs['long_name']
)
except KeyError:
qc_variable_long_name = 'Quality check results for ' + var_name
# Make a new quality control variable name. Check if exists in the
# dataset. If so loop through creation of new name until one is
# found that will not replace existing variable.
if qc_var_name is None:
qc_var_name = 'qc_' + var_name
variable_names = list(self._ds.data_vars)
if qc_var_name in variable_names:
for ii in range(1, 100):
temp_qc_var_name = '_'.join([qc_var_name, str(ii)])
if temp_qc_var_name not in variable_names:
qc_var_name = temp_qc_var_name
break
# Create the QC variable filled with 0 values matching the
# shape of data variable.
try:
qc_data = dask.array.from_array(
np.zeros_like(self._ds[var_name].values, dtype=np.int32),
chunks=self._ds[var_name].data.chunksize,
)
except AttributeError:
qc_data = np.zeros_like(self._ds[var_name].values, dtype=np.int32)
# Updating to use coords instead of dim, which caused a loss of
# attribuets as noted in Issue 347
self._ds[qc_var_name] = xr.DataArray(
data=qc_data,
coords=self._ds[var_name].coords,
attrs={'long_name': qc_variable_long_name, 'units': '1'},
)
# Update if using flag_values and don't want 0 to be default value.
if flag_type and flag_values_set_value != 0:
self._ds[qc_var_name].values = self._ds[qc_var_name].values + int(
flag_values_set_value
)
# Add requried variable attributes.
if flag_type:
self._ds[qc_var_name].attrs['flag_values'] = []
else:
self._ds[qc_var_name].attrs['flag_masks'] = []
self._ds[qc_var_name].attrs['flag_meanings'] = []
self._ds[qc_var_name].attrs['flag_assessments'] = []
self._ds[qc_var_name].attrs['standard_name'] = 'quality_flag'
self.update_ancillary_variable(var_name, qc_var_name=qc_var_name)
return qc_var_name
def update_ancillary_variable(self, var_name, qc_var_name=None):
"""
Method to check if ancillary_variables variable attribute
is set with quality control variable name.
Parameters
----------
var_name : str
Data variable name.
qc_var_name : str
quality control variable name. If not given will attempt
to get the name from data variable ancillary_variables
attribute.
Examples
--------
.. code-block:: python
from act.tests import EXAMPLE_AOSMET
from act.io.armfiles import read_netcdf
ds = read_netcdf(EXAMPLE_AOSMET)
var_name = 'temperature_ambient'
qc_var_name = ds.qcfilter.create_qc_variable(var_name)
del ds[var_name].attrs['ancillary_variables']
ds.qcfilter.update_ancillary_variable(var_name, qc_var_name)
print(ds[var_name].attrs['ancillary_variables'])
"""
if qc_var_name is None:
qc_var_name = self._ds.qcfilter.check_for_ancillary_qc(var_name, add_if_missing=False)
if qc_var_name is None:
return
try:
ancillary_variables = self._ds[var_name].attrs['ancillary_variables']
if qc_var_name not in ancillary_variables:
ancillary_variables = ' '.join([ancillary_variables, qc_var_name])
except KeyError:
ancillary_variables = qc_var_name
self._ds[var_name].attrs['ancillary_variables'] = ancillary_variables
def add_test(
self,
var_name,
index=None,
test_number=None,
test_meaning=None,
test_assessment='Bad',
flag_value=False,
recycle=False,
):
"""
Method to add a new test/filter to a quality control variable.
Parameters
----------
var_name : str
data variable name
index : int, bool, list of int or bool, numpy array, tuple of numpy arrays, None
Indexes into quality control array to set the test bit.
If not set or set to None will not set the test on any
element of the quality control variable but will still
add the test to the flag_masks, flag_meanings and
flag_assessments attributes.
test_number : int
Test number to use. If keyword is not set will use first
available test bit/test number.
test_meaning : str
String describing the test. Will be added to flag_meanings
variable attribute.
test_assessment : str
String describing the test assessment. If not set will use
"Bad" as the string to append to flag_assessments. Will
update to be capitalized.
flag_value : boolean
Switch to use flag_values integer quality control.
recyle : boolean
Option to use number less than next highest test if available. For example
tests 1, 2, 4, 5 are set. Set to true the next test chosen will be 3, else
will be 6.
Returns
-------
test_dict : dict
A dictionary containing information added to the QC
variable.
Examples
--------
.. code-block:: python
result = ds.qcfilter.add_test(var_name, test_meaning='Birds!')
"""
test_dict = {}
if test_meaning is None:
raise ValueError(
'You need to provide a value for test_meaning '
'keyword when calling the add_test method'
)
# This ensures the indexing will work even if given float values.
# Preserves tuples from np.where() or boolean arrays for standard
# python indexing.
if index is not None and not isinstance(index, (np.ndarray, tuple)):
index = np.array(index)
if index.dtype.kind not in np.typecodes['AllInteger']:
index = index.astype(int)
# Ensure assessment is capitalized to be consistent
test_assessment = test_assessment.capitalize()
qc_var_name = self._ds.qcfilter.check_for_ancillary_qc(var_name, flag_type=flag_value)
if test_number is None:
test_number = self._ds.qcfilter.available_bit(qc_var_name, recycle=recycle)
self._ds.qcfilter.set_test(var_name, index, test_number, flag_value)
if flag_value:
try:
self._ds[qc_var_name].attrs['flag_values'].append(test_number)
except KeyError:
self._ds[qc_var_name].attrs['flag_values'] = [test_number]
else:
# Determine if flag_masks test number is too large for current data type.
# If so up convert data type.
flag_masks = np.array(self._ds[qc_var_name].attrs['flag_masks'])
mask_dtype = flag_masks.dtype
if not np.issubdtype(mask_dtype, np.integer):
mask_dtype = np.uint32
if np.iinfo(mask_dtype).max - set_bit(0, test_number) <= -1:
if mask_dtype == np.int8 or mask_dtype == np.uint8:
mask_dtype = np.uint16
elif mask_dtype == np.int16 or mask_dtype == np.uint16:
mask_dtype = np.uint32
elif mask_dtype == np.int32 or mask_dtype == np.uint32:
mask_dtype = np.uint64
flag_masks = flag_masks.astype(mask_dtype)
flag_masks = np.append(flag_masks, np.array(set_bit(0, test_number), dtype=mask_dtype))
self._ds[qc_var_name].attrs['flag_masks'] = list(flag_masks)
try:
self._ds[qc_var_name].attrs['flag_meanings'].append(test_meaning)
except KeyError:
self._ds[qc_var_name].attrs['flag_meanings'] = [test_meaning]
try:
self._ds[qc_var_name].attrs['flag_assessments'].append(test_assessment)
except KeyError:
self._ds[qc_var_name].attrs['flag_assessments'] = [test_assessment]
test_dict['test_number'] = test_number
test_dict['test_meaning'] = test_meaning
test_dict['test_assessment'] = test_assessment
test_dict['qc_variable_name'] = qc_var_name
test_dict['variable_name'] = var_name
return test_dict
def remove_test(
self,
var_name=None,
test_number=None,
qc_var_name=None,
flag_value=False,
flag_values_reset_value=0,
):
"""
Method to remove a test/filter from a quality control variable. Must set
var_name or qc_var_name.
Parameters
----------
var_name : str or None
Data variable name.
test_number : int
Test number to remove.
qc_var_name : str or None
Quality control variable name. Ignored if var_name is set.
flag_value : boolean
Switch to use flag_values integer quality control.
flag_values_reset_value : int
Value to use when resetting a flag_values value to not be set.
Examples
--------
.. code-block:: python
ds.qcfilter.remove_test(var_name, test_number=3)
"""
if test_number is None:
raise ValueError(
'You need to provide a value for test_number '
'keyword when calling the remove_test() method'
)
if var_name is None and qc_var_name is None:
raise ValueError(
'You need to provide a value for var_name or qc_var_name '
'keyword when calling the remove_test() method'
)
if var_name is not None:
qc_var_name = self._ds.qcfilter.check_for_ancillary_qc(var_name)
# Determine which index is using the test number
index = None
if flag_value:
flag_values = self._ds[qc_var_name].attrs['flag_values']
for ii, flag_num in enumerate(flag_values):
if flag_num == test_number:
index = ii
break
else:
flag_masks = self._ds[qc_var_name].attrs['flag_masks']
for ii, bit_num in enumerate(flag_masks):
if parse_bit(bit_num)[0] == test_number:
index = ii
break
# If can't find the index of test return before doing anything.
if index is None:
return
if flag_value:
remove_index = self._ds.qcfilter.get_qc_test_mask(
var_name=var_name,
qc_var_name=qc_var_name,
test_number=test_number,
return_index=True,
flag_value=True,
)
self._ds.qcfilter.unset_test(
var_name=var_name,
qc_var_name=qc_var_name,
index=remove_index,
test_number=test_number,
flag_value=flag_value,
flag_values_reset_value=flag_values_reset_value,
)
del flag_values[index]
self._ds[qc_var_name].attrs['flag_values'] = flag_values
else:
remove_index = self._ds.qcfilter.get_qc_test_mask(
var_name=var_name,
qc_var_name=qc_var_name,
test_number=test_number,
return_index=True,
)
self._ds.qcfilter.unset_test(
var_name=var_name,
qc_var_name=qc_var_name,
index=remove_index,
test_number=test_number,
flag_value=flag_value,
)
if isinstance(flag_masks, list):
del flag_masks[index]
else:
flag_masks = np.delete(flag_masks, index)
self._ds[qc_var_name].attrs['flag_masks'] = flag_masks
flag_meanings = self._ds[qc_var_name].attrs['flag_meanings']
del flag_meanings[index]
self._ds[qc_var_name].attrs['flag_meanings'] = flag_meanings
flag_assessments = self._ds[qc_var_name].attrs['flag_assessments']
del flag_assessments[index]
self._ds[qc_var_name].attrs['flag_assessments'] = flag_assessments
def set_test(self, var_name, index=None, test_number=None, flag_value=False):
"""
Method to set a test/filter in a quality control variable.
Parameters
----------
var_name : str
Data variable name.
index : int or list or numpy array
Index to set test in quality control array. If want to
unset all values will need to pass in index of all values.
test_number : int
Test number to set.
flag_value : boolean
Switch to use flag_values integer quality control.
Examples
--------
.. code-block:: python
index = [0, 1, 2, 30]
ds.qcfilter.set_test(var_name, index=index, test_number=2)
"""
qc_var_name = self._ds.qcfilter.check_for_ancillary_qc(var_name)
qc_variable = np.array(self._ds[qc_var_name].values)
# Ensure the qc_variable data type is integer. This ensures bitwise comparison
# will not cause an error.
if qc_variable.dtype.kind not in np.typecodes['AllInteger']:
qc_variable = qc_variable.astype(int)
# Determine if test number is too large for current data type. If so
# up convert data type.
dtype = qc_variable.dtype
if np.iinfo(dtype).max - set_bit(0, test_number) < -1:
if dtype == np.int8:
dtype = np.int16
elif dtype == np.int16:
dtype = np.int32
elif dtype == np.int32:
dtype = np.int64
qc_variable = qc_variable.astype(dtype)
if index is not None:
if flag_value:
qc_variable[index] = test_number
else:
if bool(np.shape(index)):
qc_variable[index] = set_bit(qc_variable[index], test_number)
elif index == 0:
qc_variable = set_bit(qc_variable, test_number)
self._ds[qc_var_name].values = qc_variable
def unset_test(
self,
var_name=None,
qc_var_name=None,
index=None,
test_number=None,
flag_value=False,
flag_values_reset_value=0,
):
"""
Method to unset a test/filter from a quality control variable.
Parameters
----------
var_name : str or None
Data variable name.
qc_var_name : str or None
Quality control variable name. Ignored if var_name is set.
index : int or list or numpy array
Index to unset test in quality control array. If want to
unset all values will need to pass in index of all values.
test_number : int
Test number to remove.
flag_value : boolean
Switch to use flag_values integer quality control.
flag_values_reset_value : int
Value to use when resetting a flag_values value to not be set.
Examples
--------
.. code-block:: python
ds.qcfilter.unset_test(var_name, index=range(10, 100), test_number=2)
"""
if index is None:
return
if var_name is None and qc_var_name is None:
raise ValueError(
'You need to provide a value for var_name or qc_var_name '
'keyword when calling the unset_test() method'
)
if var_name is not None:
qc_var_name = self._ds.qcfilter.check_for_ancillary_qc(var_name)
# Get QC variable
qc_variable = self._ds[qc_var_name].values
# Ensure the qc_variable data type is integer. This ensures bitwise comparison
# will not cause an error.
if qc_variable.dtype.kind not in np.typecodes['AllInteger']:
qc_variable = qc_variable.astype(int)
if flag_value:
qc_variable[index] = flag_values_reset_value
else:
qc_variable[index] = unset_bit(qc_variable[index], test_number)
self._ds[qc_var_name].values = qc_variable
def available_bit(self, qc_var_name, recycle=False):
"""
Method to determine next available bit or flag to use with a QC test.
Will check for flag_masks first and if not found will check for
flag_values. This will drive how the next value is chosen.
Parameters
----------
qc_var_name : str
Quality control variable name.
recycle : boolean
Option to look for a bit (test) not in use starting from 1.
If a test is not defined will return the lowest number, else
will just use next highest number.
Returns
-------
test_num : int
Next available test number.
Examples
--------
.. code-block:: python
from act.tests import EXAMPLE_METE40
from act.io.armfiles import read_netcdf
ds = read_netcdf(EXAMPLE_METE40, cleanup_qc=True)
test_number = ds.qcfilter.available_bit('qc_atmos_pressure')
print(test_number)
"""
try:
flag_masks = self._ds[qc_var_name].attrs['flag_masks']
flag_value = False
except KeyError:
try:
flag_masks = self._ds[qc_var_name].attrs['flag_values']
flag_value = True
except KeyError:
try:
self._ds[qc_var_name].attrs['flag_values']
flag_masks = self._ds[qc_var_name].attrs['flag_masks']
flag_value = False
except KeyError:
raise ValueError(
'Problem getting next value from '
'available_bit(). flag_values and '
'flag_masks not set as expected'
)
if flag_masks == []:
next_bit = 1
else:
if flag_value:
if recycle:
next_bit = min(set(range(1, 100000)) - set(flag_masks))
else:
next_bit = max(flag_masks) + 1
else:
if recycle:
tests = [parse_bit(mask)[0] for mask in flag_masks]
next_bit = min(set(range(1, 63)) - set(tests))
else:
next_bit = parse_bit(max(flag_masks))[0] + 1
return int(next_bit)
def get_qc_test_mask(
self,
var_name=None,
test_number=None,
qc_var_name=None,
flag_value=False,
return_index=False,
):
"""
Returns a numpy array of False or True where a particular
flag or bit is set in a numpy array. Must set var_name or qc_var_name
when calling.
Parameters
----------
var_name : str or None
Data variable name.
test_number : int
Test number to return array where test is set.
qc_var_name : str or None
Quality control variable name. Ignored if var_name is set.
flag_value : boolean
Switch to use flag_values integer quality control.
return_index : boolean
Return a numpy array of index numbers into QC array where the
test is set instead of False or True mask.
Returns
-------
test_mask : numpy bool array or numpy integer array
A numpy boolean array with False or True where the test number or
bit was set, or numpy integer array of indexes where test is True.
Examples
--------
.. code-block:: python
from act.io.armfiles import read_netcdf
from act.tests import EXAMPLE_IRT25m20s
ds = read_netcdf(EXAMPLE_IRT25m20s)
var_name = "inst_up_long_dome_resist"
result = ds.qcfilter.add_test(
var_name, index=[0, 1, 2], test_meaning="Birds!"
)
qc_var_name = result["qc_variable_name"]
mask = ds.qcfilter.get_qc_test_mask(
var_name, result["test_number"], return_index=True
)
print(mask)
array([0, 1, 2])
mask = ds.qcfilter.get_qc_test_mask(var_name, result["test_number"])
print(mask)
array([True, True, True, ..., False, False, False])
data = ds[var_name].values
print(data[mask])
array([7.84, 7.8777, 7.8965], dtype=float32)
import numpy as np
data[mask] = np.nan
print(data)
array([nan, nan, nan, ..., 7.6705, 7.6892, 7.6892], dtype=float32)
"""
if var_name is None and qc_var_name is None:
raise ValueError(
'You need to provide a value for var_name or qc_var_name '
'keyword when calling the get_qc_test_mask() method'
)
if test_number is None:
raise ValueError(
'You need to provide a value for test_number '
'keyword when calling the get_qc_test_mask() method'
)
if var_name is not None:
qc_var_name = self._ds.qcfilter.check_for_ancillary_qc(var_name)
qc_variable = self._ds[qc_var_name].values
# Ensure the qc_variable data type is integer. This ensures bitwise comparison
# will not cause an error.
if qc_variable.dtype.kind not in np.typecodes['AllInteger']:
qc_variable = qc_variable.astype(int)
if flag_value:
tripped = qc_variable == test_number
else:
check_bit = set_bit(0, test_number) & qc_variable
tripped = check_bit > 0
test_mask = np.full(qc_variable.shape, False, dtype='bool')
# Make sure test_mask is an array. If qc_variable is scalar will
# be retuned from np.zeros as scalar.
test_mask = np.atleast_1d(test_mask)
test_mask[tripped] = True
test_mask = np.ma.make_mask(test_mask, shrink=False)
if return_index:
test_mask = np.where(test_mask)[0]
return test_mask
def get_masked_data(
self,
var_name,
rm_assessments=None,
rm_tests=None,
return_nan_array=False,
ma_fill_value=None,
return_inverse=False,
):
"""
Returns a numpy masked array containing data and mask or
a numpy float array with masked values set to NaN.
Parameters
----------
var_name : str
Data variable name.
rm_assessments : str or list of str
Assessment name to exclude from returned data.
rm_tests : int or list of int
Test numbers to exclude from returned data. This is the test
number (or bit position number) not the mask number.
return_nan_array : boolean
Return a numpy array with filtered ( or masked) values
set to numpy NaN value. If the data is type int will upconvert
to numpy float to allow setting NaN value.
ma_fill_value : int or float (or str?)
The numpy masked array fill_value used in creation of the the
masked array. If the datatype needs to be upconverted to allow
the fill value to be used, data will be upconverted.
return_inverse : boolean
Invert the masked array mask or return data array where mask is set
to False instead of True set to NaN. Useful for overplotting
where failing.
Returns
-------
variable : numpy masked array or numpy float array
Default is to return a numpy masked array with the mask set to
True where the test with requested assessment or test number
was found set.
If return_nan_array is True will return numpy array upconverted
to float with locations where the test with requested assessment
or test number was found set converted to NaN.
Examples
--------
.. code-block:: python
from act.io.armfiles import read_netcdf
from act.tests import EXAMPLE_IRT25m20s
ds = read_netcdf(EXAMPLE_IRT25m20s)
var_name = "inst_up_long_dome_resist"
result = ds.qcfilter.add_test(
var_name, index=[0, 1, 2], test_meaning="Birds!"
)
data = ds.qcfilter.get_masked_data(
var_name, rm_assessments=["Bad", "Indeterminate"]
)
print(data)
masked_array(
data=[..., 7.670499801635742, 7.689199924468994, 7.689199924468994],
mask=[..., False, False, False],
fill_value=1e20,
dtype=float32,
)
"""
qc_var_name = self._ds.qcfilter.check_for_ancillary_qc(var_name, add_if_missing=False)
flag_value = False
flag_values = None
flag_masks = None
flag_assessments = None
try:
flag_assessments = self._ds[qc_var_name].attrs['flag_assessments']
flag_masks = self._ds[qc_var_name].attrs['flag_masks']
except KeyError:
pass
try:
flag_values = self._ds[qc_var_name].attrs['flag_values']
flag_value = True
except KeyError:
pass
test_numbers = []
if rm_tests is not None:
if isinstance(rm_tests, (int, float, str)):
rm_tests = [int(rm_tests)]
test_numbers.extend(rm_tests)
if rm_assessments is not None:
if isinstance(rm_assessments, str):
rm_assessments = [rm_assessments]
if flag_masks is not None:
test_nums = [parse_bit(mask)[0] for mask in flag_masks]
if flag_values is not None:
test_nums = flag_values
rm_assessments = [x.lower() for x in rm_assessments]
if flag_assessments is not None:
for ii, assessment in enumerate(flag_assessments):
if assessment.lower() in rm_assessments:
test_numbers.append(test_nums[ii])
# Make the list of test numbers to mask unique
test_numbers = list(set(test_numbers))
# Create mask of indexes by looking where each test is set
variable = self._ds[var_name].values
nan_dtype = np.float32
if variable.dtype in (np.float64, np.int64):
nan_dtype = np.float64
mask = np.zeros(variable.shape, dtype=bool)
for test in test_numbers:
mask = mask | self._ds.qcfilter.get_qc_test_mask(var_name, test, flag_value=flag_value)
# Convert data numpy array into masked array
try:
variable = np.ma.array(variable, mask=mask, fill_value=ma_fill_value)
except TypeError:
variable = np.ma.array(
variable,
mask=mask,
fill_value=ma_fill_value,
dtype=np.array(ma_fill_value).dtype,
)
# If requested switch array from where data is not failing tests
# to where data is failing tests. This can be used when over plotting
# where the data if failing the tests.
if return_inverse:
mask = variable.mask
mask = np.invert(mask)
variable.mask = mask
# If asked to return numpy array with values set to NaN
if return_nan_array:
variable = variable.astype(nan_dtype)
variable = variable.filled(fill_value=np.nan)
return variable
def datafilter(
self,
variables=None,
rm_assessments=None,
rm_tests=None,
verbose=False,
del_qc_var=False,
):
"""
Method to apply quality control variables to data variables by
changing the data values in the dataset using quality control variables.
The data is updated with failing data set to
NaN. This can be used to update the data variable in the xarray
dataset for use with xarray methods to perform analysis on the data
since those methods don't read the quality control variables.
Parameters
----------
variables : None or str or list of str
Data variable names to process. If set to None will update all
data variables.
rm_assessments : str or list of str
Assessment names listed under quality control varible flag_assessments
to exclude from returned data. Examples include
['Bad', 'Incorrect', 'Indeterminate', 'Suspect']
rm_tests : int or list of int
Test numbers listed under quality control variable to exclude from
returned data. This is the test
number (or bit position number) not the mask number.
verbose : boolean
Print processing information.
del_qc_var : boolean
Option to delete quality control variable after processing. Since
the data values can not be determined after they are set to NaN
and xarray method processing would also process the quality control
variables, the default is to remove the quality control data
variables. Defaults to False.
Examples
--------
.. code-block:: python
from act.io.armfiles import read_netcdf
from act.tests import EXAMPLE_MET1
ds = read_netcdf(EXAMPLE_MET1)
ds.clean.cleanup()
var_name = "atmos_pressure"
ds_1 = ds.nanmean()
ds.qcfilter.add_less_test(var_name, 99, test_assessment="Bad")
ds.qcfilter.datafilter(rm_assessments="Bad")
ds_2 = ds.nanmean()
print("All_data =", ds_1[var_name].values)
All_data = 98.86098
print("Bad_Removed =", ds_2[var_name].values)
Bad_Removed = 99.15148
"""
if rm_assessments is None and rm_tests is None:
raise ValueError('Need to set rm_assessments or rm_tests option')
if variables is not None and isinstance(variables, str):
variables = [variables]
if variables is None:
variables = list(self._ds.data_vars)
for var_name in variables:
qc_var_name = self.check_for_ancillary_qc(var_name, add_if_missing=False, cleanup=False)
if qc_var_name is None:
if verbose:
if var_name in ['base_time', 'time_offset']:
continue
try:
if self._ds[var_name].attrs['standard_name'] == 'quality_flag':
continue
except KeyError:
pass
print(f'No quality control variable for {var_name} found '
f'in call to .qcfilter.datafilter()')
continue
# Need to return data as Numpy array with NaN values. Setting the Dask array
# to Numpy masked array does not work with other tools.
data = self.get_masked_data(
var_name,
rm_assessments=rm_assessments,
rm_tests=rm_tests,
return_nan_array=True
)
# If data was orginally stored as Dask array return values to Dataset as Dask array
# else set as Numpy array.
try:
self._ds[var_name].data = dask.array.from_array(
data, chunks=self._ds[var_name].data.chunksize)
except AttributeError:
self._ds[var_name].values = data
# Adding information on filtering to history attribute
flag_masks = None
flag_assessments = None
flag_meanings = None
try:
flag_assessments = list(self._ds[qc_var_name].attrs['flag_assessments'])
flag_masks = list(self._ds[qc_var_name].attrs['flag_masks'])
flag_meanings = list(self._ds[qc_var_name].attrs['flag_meanings'])
except KeyError:
pass
# Add comment to history for each test that's filtered out
if isinstance(rm_tests, int):
rm_tests = [rm_tests]
if rm_tests is not None:
for test in list(rm_tests):
test = 2 ** (test - 1)
if test in flag_masks:
index = flag_masks.index(test)
comment = ''.join(['act.qc.datafilter: ', flag_meanings[index]])
if 'history' in self._ds[var_name].attrs.keys():
self._ds[var_name].attrs['history'] += '\n' + comment
else:
self._ds[var_name].attrs['history'] = comment
if isinstance(rm_assessments, str):
rm_assessments = [rm_assessments]
if rm_assessments is not None:
for assessment in rm_assessments:
if assessment in flag_assessments:
index = [i for i, e in enumerate(flag_assessments) if e == assessment]
for ind in index:
comment = ''.join(['act.qc.datafilter: ', flag_meanings[ind]])
if 'history' in self._ds[var_name].attrs.keys():
self._ds[var_name].attrs['history'] += '\n' + comment
else:
self._ds[var_name].attrs['history'] = comment
# If requested delete quality control variable
if del_qc_var:
del self._ds[qc_var_name]
if verbose:
print(f'Deleting {qc_var_name} from dataset')
def set_bit(array, bit_number):
"""
Function to set a quality control bit given a scalar or
array of values and a bit number.
Parameters
----------
array : int list of int or numpy array of int
The bitpacked array to set the bit number.
bit_number : int
The bit (or test) number to set starting at 1.
Returns
-------
array : int, numpy array, tuple, list
Integer or numpy array with bit set for each element of the array.
Returned in same type.
Examples
--------
Example use setting bit 2 to an array called data:
.. code-block:: python
from act.qc.qcfilter import set_bit
data = np.array(range(0, 7))
data = set_bit(data, 2)
print(data)
array([2, 3, 2, 3, 6, 7, 6])
"""
was_list = False
was_tuple = False
if isinstance(array, list):
array = np.array(array)
was_list = True
if isinstance(array, tuple):
array = np.array(array)
was_tuple = True
if bit_number > 0:
array |= 1 << bit_number - 1
if was_list:
array = list(array)
if was_tuple:
array = tuple(array)
return array
def unset_bit(array, bit_number):
"""
Function to remove a quality control bit given a
scalar or array of values and a bit number.
Parameters
----------
array : int list of int or numpy array
Array of integers containing bit packed numbers.
bit_number : int
Bit number to remove starting at 1.
Returns
-------
array : int or numpy array
Returns same data type as array entered with bit removed. Will
fail gracefully if the bit requested to be removed was not set.
Examples
--------
.. code-block:: python
from act.qc.qcfilter import set_bit, unset_bit
data = set_bit([0, 1, 2, 3, 4], 2)
data = set_bit(data, 3)
print(data)
[6, 7, 6, 7, 6]
data = unset_bit(data, 2)
print(data)
[4, 5, 4, 5, 4]
"""
was_list = False
was_tuple = False
if isinstance(array, list):
array = np.array(array)
was_list = True
if isinstance(array, tuple):
array = np.array(array)
was_tuple = True
if bit_number > 0:
array &= ~(1 << bit_number - 1)
if was_list:
array = list(array)
if was_tuple:
array = tuple(array)
return array
def parse_bit(qc_bit):
"""
Given a single integer value, return bit positions.
Parameters
----------
qc_bit : int or numpy int
Bit packed integer number to be parsed.
Returns
-------
bit_number : numpy.int32 array
Array containing all bit numbers of the bit packed number.
If no bits set returns empty array.
Examples
--------
.. code-block:: python
from act.qc.qcfilter import parse_bit
parse_bit(7)
array([1, 2, 3], dtype=int32)
"""
if isinstance(qc_bit, (list, tuple, np.ndarray)):
if len(qc_bit) > 1:
raise ValueError('Must be a single value.')
qc_bit = qc_bit[0]
if qc_bit < 0:
raise ValueError('Must be a positive integer.')
# Convert integer value to single element numpy array of type unsigned integer 64
value = np.array([qc_bit]).astype(">u8")
# Convert value to view containing only unsigned integer 8 data type. This
# is required for the numpy unpackbits function which only works with
# unsigned integer 8 bit data type.
value = value.view("u1")
# Unpack bits using numpy into array of 1 where bit is set and convert into boolean array
index = np.unpackbits(value).astype(bool)
# Create range of numbers from 64 to 1 and subset where unpackbits found a bit set.
bit_number = np.arange(index.size, 0, -1)[index]
# Flip the array to increasing numbers to match historical method
bit_number = np.flip(bit_number)
# bit_number = []
# qc_bit = int(qc_bit)
# counter = 0
# while qc_bit > 0:
# temp_value = qc_bit % 2
# qc_bit = qc_bit >> 1
# counter += 1
# if temp_value == 1:
# bit_number.append(counter)
# Convert data type into expected type
bit_number = np.asarray(bit_number, dtype=np.int32)
return bit_number
<|code_end|>
|
Update ARM DQR webservice
### Description
The ARM DQR web-service is changing. Current system uses https://adc.arm.gov/dqrws/ and it is changing to new service https://dqr-web-service.svcs.arm.gov/docs
### What I Did
I have done nothing yet, but I will update the code to use the new web-service.
```
Paste the command(s) you ran and the output.
If there was a crash, please include the traceback here.
```
|
act/qc/arm.py
<|code_start|>"""
Functions specifically for working with QC/DQRs from
the Atmospheric Radiation Measurement Program (ARM).
"""
import datetime as dt
import numpy as np
import requests
from act.config import DEFAULT_DATASTREAM_NAME
def add_dqr_to_qc(
ds,
variable=None,
assessment='incorrect,suspect',
exclude=None,
include=None,
normalize_assessment=True,
cleanup_qc=True,
dqr_link=False,
skip_location_vars=False,
):
"""
Function to query the ARM DQR web service for reports and
add as a new quality control test to ancillary quality control
variable. If no anicllary quality control variable exist a new
one will be created and lined to the data variable through
ancillary_variables attribure.
See online documentation from ARM Data
Quality Office on the use of the DQR web service.
https://code.arm.gov/docs/dqrws-examples/wikis/home
Information about the DQR web-service avaible at
https://adc.arm.gov/dqrws/
Parameters
----------
ds : xarray.Dataset
Xarray dataset
variable : string, or list of str, or None
Variables to check DQR web service. If set to None will
attempt to update all variables.
assessment : string
assessment type to get DQRs. Current options include
'missing', 'suspect', 'incorrect' or any combination separated
by a comma.
exclude : list of strings
DQR IDs to exclude from adding into QC
include : list of strings
List of DQR IDs to include in flagging of data. Any other DQR IDs
will be ignored.
normalize_assessment : boolean
The DQR assessment term is different than the embedded QC
term. Embedded QC uses "Bad" and "Indeterminate" while
DQRs use "Incorrect" and "Suspect". Setting this will ensure
the same terms are used for both.
cleanup_qc : boolean
Call clean.cleanup() method to convert to standardized ancillary
quality control variables. Has a little bit of overhead so
if the Dataset has already been cleaned up, no need to run.
dqr_link : boolean
Prints out a link for each DQR to read the full DQR. Defaults to False
skip_location_vars : boolean
Does not apply DQRs to location variables. This can be useful in the event
the submitter has erroneously selected all variables.
Returns
-------
ds : xarray.Dataset
Xarray dataset containing new quality control variables
Examples
--------
.. code-block:: python
from act.qc.arm import add_dqr_to_qc
ds = add_dqr_to_qc(ds, variable=['temp_mean', 'atmos_pressure'])
"""
# DQR Webservice goes off datastreams, pull from the dataset
if 'datastream' in ds.attrs:
datastream = ds.attrs['datastream']
elif '_datastream' in ds.attrs:
datastream = ds.attrs['_datastream']
else:
raise ValueError('Dataset does not have datastream attribute')
if datastream == DEFAULT_DATASTREAM_NAME:
raise ValueError("'datastream' name required for DQR service set to default value "
f"{datastream}. Unable to perform DQR service query.")
# Clean up QC to conform to CF conventions
if cleanup_qc:
ds.clean.cleanup()
# In order to properly flag data, get all variables if None. Exclude QC variables.
if variable is None:
variable = list(set(ds.data_vars) - set(ds.clean.matched_qc_variables))
# Check to ensure variable is list
if not isinstance(variable, (list, tuple)):
variable = [variable]
# Loop through each variable and call web service for that variable
loc_vars = ['lat', 'lon', 'alt', 'latitude', 'longitude', 'altitude']
for var_name in variable:
if skip_location_vars:
if var_name in loc_vars:
continue
# Create URL
url = 'http://www.archive.arm.gov/dqrws/ARMDQR?datastream='
url += datastream
url += '&varname=' + var_name
url += ''.join(
[
'&searchmetric=',
assessment,
'&dqrfields=dqrid,starttime,endtime,metric,subject',
]
)
# Call web service
req = requests.get(url)
# Check status values and raise error if not successful
status = req.status_code
if status == 400:
raise ValueError('Check parameters')
if status == 500:
raise ValueError('DQR Webservice Temporarily Down')
# Get data and run through each dqr
dqrs = req.text.splitlines()
time = ds['time'].values
dqr_results = {}
for line in dqrs:
line = line.split('|')
dqr_no = line[0]
# Exclude DQRs if in list
if exclude is not None and dqr_no in exclude:
continue
# Only include if in include list
if include is not None and dqr_no not in include:
continue
starttime = np.datetime64(dt.datetime.utcfromtimestamp(int(line[1])))
endtime = np.datetime64(dt.datetime.utcfromtimestamp(int(line[2])))
ind = np.where((time >= starttime) & (time <= endtime))
if ind[0].size == 0:
continue
if 'time' not in ds[var_name].dims:
ind = np.where((ds[var_name].values == ds[var_name].values) | (np.isnan(ds[var_name].values)))
if np.size(ind) == 1:
ind = ind[0]
if dqr_no in dqr_results.keys():
dqr_results[dqr_no]['index'] = np.append(dqr_results[dqr_no]['index'], ind)
else:
dqr_results[dqr_no] = {
'index': ind,
'test_assessment': line[3],
'test_meaning': ': '.join([dqr_no, line[-1]]),
}
if dqr_link:
print_url = 'https://adc.arm.gov/ArchiveServices/DQRService?dqrid=' + str(dqr_no)
print(dqr_no, '-', line[3], ':', print_url)
for key, value in dqr_results.items():
try:
ds.qcfilter.add_test(
var_name,
index=value['index'],
test_meaning=value['test_meaning'],
test_assessment=value['test_assessment'],
)
except IndexError:
print(f"Skipping '{var_name}' DQR application because of IndexError")
if normalize_assessment:
ds.clean.normalize_assessment(variables=var_name)
return ds
<|code_end|>
|
act/qc/arm.py
<|code_start|>"""
Functions specifically for working with QC/DQRs from
the Atmospheric Radiation Measurement Program (ARM).
"""
import datetime as dt
import numpy as np
import requests
import json
from act.config import DEFAULT_DATASTREAM_NAME
def add_dqr_to_qc(
ds,
variable=None,
assessment='incorrect,suspect',
exclude=None,
include=None,
normalize_assessment=True,
cleanup_qc=True,
dqr_link=False,
skip_location_vars=False,
):
"""
Function to query the ARM DQR web service for reports and
add as a new quality control test to ancillary quality control
variable. If no anicllary quality control variable exist a new
one will be created and lined to the data variable through
ancillary_variables attribure.
See online documentation from ARM Data
Quality Office on the use of the DQR web service.
https://code.arm.gov/docs/dqrws-examples/wikis/home
Information about the DQR web-service avaible at
https://adc.arm.gov/dqrws/
Parameters
----------
ds : xarray.Dataset
Xarray dataset
variable : string, or list of str, or None
Variables to check DQR web service. If set to None will
attempt to update all variables.
assessment : string
assessment type to get DQRs. Current options include
'missing', 'suspect', 'incorrect' or any combination separated
by a comma.
exclude : list of strings
DQR IDs to exclude from adding into QC
include : list of strings
List of DQR IDs to include in flagging of data. Any other DQR IDs
will be ignored.
normalize_assessment : boolean
The DQR assessment term is different than the embedded QC
term. Embedded QC uses "Bad" and "Indeterminate" while
DQRs use "Incorrect" and "Suspect". Setting this will ensure
the same terms are used for both.
cleanup_qc : boolean
Call clean.cleanup() method to convert to standardized ancillary
quality control variables. Has a little bit of overhead so
if the Dataset has already been cleaned up, no need to run.
dqr_link : boolean
Prints out a link for each DQR to read the full DQR. Defaults to False
skip_location_vars : boolean
Does not apply DQRs to location variables. This can be useful in the event
the submitter has erroneously selected all variables.
Returns
-------
ds : xarray.Dataset
Xarray dataset containing new or updated quality control variables
Examples
--------
.. code-block:: python
from act.qc.arm import add_dqr_to_qc
ds = add_dqr_to_qc(ds, variable=['temp_mean', 'atmos_pressure'])
"""
# DQR Webservice goes off datastreams, pull from the dataset
if 'datastream' in ds.attrs:
datastream = ds.attrs['datastream']
elif '_datastream' in ds.attrs:
datastream = ds.attrs['_datastream']
else:
raise ValueError('Dataset does not have datastream attribute')
if datastream == DEFAULT_DATASTREAM_NAME:
raise ValueError("'datastream' name required for DQR service set to default value "
f"{datastream}. Unable to perform DQR service query.")
# Clean up QC to conform to CF conventions
if cleanup_qc:
ds.clean.cleanup()
start_date = ds['time'].values[0].astype('datetime64[s]').astype(dt.datetime).strftime('%Y%m%d')
end_date = ds['time'].values[-1].astype('datetime64[s]').astype(dt.datetime).strftime('%Y%m%d')
# Clean up assessment to ensure it is a string with no spaces.
if isinstance(assessment, (list, tuple)):
assessment = ','.join(assessment)
# Not strictly needed but should make things more better.
assessment = assessment.replace(' ', '')
assessment = assessment.lower()
# Create URL
url = 'https://dqr-web-service.svcs.arm.gov/dqr_full'
url += f"/{datastream}"
url += f"/{start_date}/{end_date}"
url += f"/{assessment}"
# Call web service
req = requests.get(url)
# Check status values and raise error if not successful
status = req.status_code
if status == 400:
raise ValueError('Check parameters')
if status == 500:
raise ValueError('DQR Webservice Temporarily Down')
# Convert from string to dictionary
docs = json.loads(req.text)
# If no DQRs found will not have a key with datastream.
# The status will also be 404.
try:
docs = docs[datastream]
except KeyError:
return ds
dqr_results = {}
for quality_category in docs:
for dqr_number in docs[quality_category]:
if exclude is not None and dqr_number in exclude:
continue
if include is not None and dqr_number not in include:
continue
index = np.array([], dtype=np.int32)
for time_range in docs[quality_category][dqr_number]['dates']:
starttime = np.datetime64(time_range['start_date'])
endtime = np.datetime64(time_range['end_date'])
ind = np.where((ds['time'].values >= starttime) & (ds['time'].values <= endtime))
if ind[0].size > 0:
index = np.append(index, ind[0])
if index.size > 0:
dqr_results[dqr_number] = {
'index': index,
'test_assessment': quality_category.lower().capitalize(),
'test_meaning': f"{dqr_number} : {docs[quality_category][dqr_number]['description']}",
'variables': docs[quality_category][dqr_number]['variables'],
}
if dqr_link:
print(f"{dqr_number} - {quality_category.lower().capitalize()}: "
f"https://adc.arm.gov/ArchiveServices/DQRService?dqrid={dqr_number}")
# Check to ensure variable is list
if variable and not isinstance(variable, (list, tuple)):
variable = [variable]
loc_vars = ['lat', 'lon', 'alt', 'latitude', 'longitude', 'altitude']
for key, value in dqr_results.items():
for var_name in value['variables']:
# Do not process on location variables
if skip_location_vars and var_name in loc_vars:
continue
# Only process provided variable names
if variable is not None and var_name not in variable:
continue
try:
ds.qcfilter.add_test(
var_name,
index=np.unique(value['index']),
test_meaning=value['test_meaning'],
test_assessment=value['test_assessment'])
except KeyError: # Variable name not in Dataset
continue
except IndexError:
print(f"Skipping '{var_name}' DQR application because of IndexError")
continue
if normalize_assessment:
ds.clean.normalize_assessment(variables=var_name)
return ds
<|code_end|>
|
setup.cfg excluding armfiles.py
I just noticed in the setup.cfg script, we are excluding act.io.armfiles.py (soon to be arm.py). We need to figure out if that's still necessary.
|
act/io/armfiles.py
<|code_start|>"""
This module contains I/O operations for loading files that were created for the
Atmospheric Radiation Measurement program supported by the Department of Energy
Office of Science.
"""
import copy
import glob
import json
import re
import urllib
import warnings
from pathlib import Path, PosixPath
from netCDF4 import Dataset
from os import PathLike
import tarfile
import tempfile
import warnings
from cftime import num2date
import numpy as np
import xarray as xr
import datetime as dt
import act
import act.utils as utils
from act.config import DEFAULT_DATASTREAM_NAME
from act.utils.io_utils import unpack_tar, unpack_gzip, cleanup_files, is_gunzip_file
def read_netcdf(
filenames,
concat_dim=None,
return_None=False,
combine='by_coords',
decode_times=True,
use_cftime=True,
use_base_time=False,
combine_attrs='override',
cleanup_qc=False,
keep_variables=None,
**kwargs,
):
"""
Returns `xarray.Dataset` with stored data and metadata from a user-defined
query of ARM-standard netCDF files from a single datastream. Has some procedures
to ensure time is correctly fomatted in returned Dataset.
Parameters
----------
filenames : str, pathlib.PosixPath, list of str, list of pathlib.PosixPath
Name of file(s) to read.
concat_dim : str
Dimension to concatenate files along.
return_None : boolean
Catch IOError exception when file not found and return None.
Default is False.
combine : str
String used by xarray.open_mfdataset() to determine how to combine
data files into one Dataset. See Xarray documentation for options.
decode_times : boolean
Standard Xarray option to decode time values from int/float to python datetime values.
Appears the default is to do this anyway but need this option to allow correct usage
of use_base_time.
use_cftime : boolean
Option to use cftime library to parse the time units string and correctly
establish the time values with a units string containing timezone offset.
This is used because the Pandas units string parser does not correctly recognize
time zone offset. Code will automatically detect cftime object and convert to datetime64
in returned Dataset.
use_base_time : boolean
Option to use ARM time variables base_time and time_offset. Useful when the time variable
is not included (older files) or when the units attribute is incorrectly formatted. Will use
the values of base_time and time_offset as seconds since epoch and create datetime64 values
for time coordinate. If set will change decode_times and use_cftime to False.
combine_attrs : str
String indicating how to combine attrs of the datasets being merged
cleanup_qc : boolean
Call clean.cleanup() method to convert to standardized ancillary quality control
variables. This will not allow any keyword options, so if non-default behavior is
desired will need to call clean.cleanup() method on the dataset after reading the data.
keep_variables : str or list of str
Variable names to read from data file. Works by creating a list of variable names
to exclude from reading and passing into open_mfdataset() via drop_variables keyword.
Still allows use of drop_variables keyword for variables not listed in first file to
read.
**kwargs : keywords
Keywords to pass through to xarray.open_mfdataset().
Returns
-------
ds : xarray.Dataset (or None)
ACT Xarray dataset (or None if no data file(s) found).
Examples
--------
This example will load the example sounding data used for unit testing.
.. code-block :: python
import act
ds = act.io.armfiles.read_netcdf(act.tests.sample_files.EXAMPLE_SONDE_WILDCARD)
print(ds)
"""
message = 'act.io.armfiles.read_netcdf will be replaced in version 2.0.0 by act.io.arm.read_arm_netcdf()'
warnings.warn(message, DeprecationWarning, 2)
ds = None
filenames, cleanup_temp_directory = check_if_tar_gz_file(filenames)
file_dates = []
file_times = []
# If requested to use base_time and time_offset, set keywords to correct attribute values
# to pass into xarray open_mfdataset(). Need to turn off decode_times and use_cftime
# or else will try to convert base_time and time_offset. Depending on values of attributes
# may cause a failure.
if use_base_time:
decode_times = False
use_cftime = False
# Add funciton keywords to kwargs dictionary for passing into open_mfdataset.
kwargs['combine'] = combine
kwargs['concat_dim'] = concat_dim
kwargs['decode_times'] = decode_times
kwargs['use_cftime'] = use_cftime
if len(filenames) > 1 and not isinstance(filenames, str):
kwargs['combine_attrs'] = combine_attrs
# Check if keep_variables is set. If so determine correct drop_variables
if keep_variables is not None:
drop_variables = None
if 'drop_variables' in kwargs.keys():
drop_variables = kwargs['drop_variables']
kwargs['drop_variables'] = keep_variables_to_drop_variables(
filenames, keep_variables, drop_variables=drop_variables)
# Create an exception tuple to use with try statements. Doing it this way
# so we can add the FileNotFoundError if requested. Can add more error
# handling in the future.
except_tuple = (ValueError,)
if return_None:
except_tuple = except_tuple + (FileNotFoundError, OSError)
try:
# Read data file with Xarray function
ds = xr.open_mfdataset(filenames, **kwargs)
except except_tuple as exception:
# If requested return None for File not found error
if type(exception).__name__ == 'FileNotFoundError':
return None
# If requested return None for File not found error
if type(exception).__name__ == 'OSError' and exception.args[0] == 'no files to open':
return None
# Look at error message and see if could be nested error message. If so
# update combine keyword and try again. This should allow reading files
# without a time variable but base_time and time_offset variables.
if (
kwargs['combine'] != 'nested'
and type(exception).__name__ == 'ValueError'
and exception.args[0] == 'Could not find any dimension coordinates '
'to use to order the datasets for concatenation'
):
kwargs['combine'] = 'nested'
ds = xr.open_mfdataset(filenames, **kwargs)
else:
# When all else fails raise the orginal exception
raise exception
# If requested use base_time and time_offset to derive time. Assumes that the units
# of both are in seconds and that the value is number of seconds since epoch.
if use_base_time:
time = num2date(ds['base_time'].values + ds['time_offset'].values, ds['base_time'].attrs['units'])
time = time.astype('datetime64[ns]')
# Need to use a new Dataset creation to correctly index time for use with
# .group and .resample methods in Xarray Datasets.
temp_ds = xr.Dataset({'time': (ds['time'].dims, time, ds['time'].attrs)})
ds['time'] = temp_ds['time']
del temp_ds
for att_name in ['units', 'ancillary_variables']:
try:
del ds['time'].attrs[att_name]
except KeyError:
pass
# Xarray has issues reading a CF formatted time units string if it contains
# timezone offset without a [+|-] preceeding timezone offset.
# https://github.com/pydata/xarray/issues/3644
# To ensure the times are read in correctly need to set use_cftime=True.
# This will read in time as cftime object. But Xarray uses numpy datetime64
# natively. This will convert the cftime time values to numpy datetime64.
desired_time_precision = 'datetime64[ns]'
for var_name in ['time', 'time_offset']:
try:
if 'time' in ds.dims and type(ds[var_name].values[0]).__module__.startswith('cftime.'):
# If we just convert time to datetime64 the group, sel, and other Xarray
# methods will not work correctly because time is not indexed. Need to
# use the formation of a Dataset to correctly set the time indexing.
temp_ds = xr.Dataset(
{
var_name: (
ds[var_name].dims,
ds[var_name].values.astype(desired_time_precision),
ds[var_name].attrs,
)
}
)
ds[var_name] = temp_ds[var_name]
del temp_ds
# If time_offset is in file try to convert base_time as well
if var_name == 'time_offset':
ds['base_time'].values = ds['base_time'].values.astype(desired_time_precision)
ds['base_time'] = ds['base_time'].astype(desired_time_precision)
except KeyError:
pass
# Check if "time" variable is not in the netCDF file. If so try to use
# base_time and time_offset to make time variable. Basically a fix for incorrectly
# formatted files. May require using decode_times=False to initially read the data.
if 'time' in ds.dims and not np.issubdtype(ds['time'].dtype, np.datetime64):
try:
ds['time'] = ds['time_offset']
except (KeyError, ValueError):
pass
# Adding support for wildcards
if isinstance(filenames, str):
filenames = glob.glob(filenames)
elif isinstance(filenames, PosixPath):
filenames = [filenames]
# Get file dates and times that were read in to the dataset
filenames.sort()
for f in filenames:
f = Path(f).name
pts = re.match(r'(^[a-zA-Z0-9]+)\.([0-9a-z]{2})\.([\d]{8})\.([\d]{6})\.([a-z]{2,3}$)', f)
# If Not ARM format, read in first time for info
if pts is not None:
pts = pts.groups()
file_dates.append(pts[2])
file_times.append(pts[3])
else:
if ds['time'].size > 1:
dummy = ds['time'].values[0]
else:
dummy = ds['time'].values
file_dates.append(utils.numpy_to_arm_date(dummy))
file_times.append(utils.numpy_to_arm_date(dummy, returnTime=True))
# Add attributes
ds.attrs['_file_dates'] = file_dates
ds.attrs['_file_times'] = file_times
is_arm_file_flag = check_arm_standards(ds)
# Ensure that we have _datastream set whether or no there's
# a datastream attribute already.
if is_arm_file_flag == 0:
ds.attrs['_datastream'] = DEFAULT_DATASTREAM_NAME
else:
ds.attrs['_datastream'] = ds.attrs['datastream']
ds.attrs['_arm_standards_flag'] = is_arm_file_flag
if cleanup_qc:
ds.clean.cleanup()
if cleanup_temp_directory:
cleanup_files(files=filenames)
return ds
def keep_variables_to_drop_variables(
filenames,
keep_variables,
drop_variables=None):
"""
Returns a list of variable names to exclude from reading by passing into
`Xarray.open_dataset` drop_variables keyword. This can greatly help reduce
loading time and disk space use of the Dataset.
When passed a netCDF file name, will open the file using the netCDF4 library to get
list of variable names. There is less overhead reading the varible names using
netCDF4 library than Xarray. If more than one filename is provided or string is
used for shell syntax globbing, will use the first file in the list.
Parameters
----------
filenames : str, pathlib.PosixPath or list of str
Name of file(s) to read.
keep_variables : str or list of str
Variable names desired to keep. Do not need to list associated dimention
names. These will be automatically kept as well.
drop_variables : str or list of str
Variable names to explicitly add to returned list. May be helpful if a variable
exists in a file that is not in the first file in the list.
Returns
-------
drop_vars : list of str
Variable names to exclude from returned Dataset by using drop_variables keyword
when calling Xarray.open_dataset().
Examples
--------
.. code-block :: python
import act
filename = '/data/datastream/hou/houkasacrcfrM1.a1/houkasacrcfrM1.a1.20220404.*.nc'
drop_vars = act.io.armfiles.keep_variables_to_drop_variables(
filename, ['lat','lon','alt','crosspolar_differential_phase'],
drop_variables='variable_name_that_only_exists_in_last_file_of_the_day')
"""
read_variables = []
return_variables = []
if isinstance(keep_variables, str):
keep_variables = [keep_variables]
if isinstance(drop_variables, str):
drop_variables = [drop_variables]
# If filenames is a list subset to first file name.
if isinstance(filenames, (list, tuple)):
filename = filenames[0]
# If filenames is a string, check if it needs to be expanded in shell
# first. Then use first returned file name. Else use the string filename.
elif isinstance(filenames, str):
filename = glob.glob(filenames)
if len(filename) == 0:
return return_variables
else:
filename.sort()
filename = filename[0]
# Use netCDF4 library to extract the variable and dimension names.
rootgrp = Dataset(filename, 'r')
read_variables = list(rootgrp.variables)
dimensions = list(rootgrp.dimensions)
# Loop over the variables to exclude needed coordinate dimention names.
dims_to_keep = []
for var_name in keep_variables:
try:
dims_to_keep.extend(list(rootgrp[var_name].dimensions))
except IndexError:
pass
rootgrp.close()
# Remove names not matching keep_varibles excluding the associated coordinate dimentions
return_variables = set(read_variables) - set(keep_variables) - set(dims_to_keep)
# Add drop_variables to list
if drop_variables is not None:
return_variables = set(return_variables) | set(drop_variables)
return list(return_variables)
def check_arm_standards(ds):
"""
Checks to see if an xarray dataset conforms to ARM standards.
Parameters
----------
ds : Xarray Dataset
The dataset to check.
Returns
-------
flag : int
The flag corresponding to whether or not the file conforms
to ARM standards. Bit packed, so 0 for no, 1 for yes
"""
the_flag = 1 << 0
if 'datastream' not in ds.attrs.keys():
the_flag = 0
# Check if the historical global attribute name is
# used instead of updated name of 'datastream'. If so
# correct the global attributes and flip flag.
if 'zeb_platform' in ds.attrs.keys():
ds.attrs['datastream'] = copy.copy(ds.attrs['zeb_platform'])
del ds.attrs['zeb_platform']
the_flag = 1 << 0
return the_flag
def create_ds_from_arm_dod(proc, set_dims, version='', fill_value=-9999.0, scalar_fill_dim=None, local_file=False):
"""
Queries the ARM DOD api and builds a dataset based on the ARM DOD and
the dimension sizes that are passed in.
Parameters
----------
proc : string
Process to create the dataset off of. This is normally in the
format of inst.level. i.e. vdis.b1 or kazrge.a1. If local file
is true, this points to the path of the .dod file.
set_dims : dict
Dictionary of dims from the DOD and the corresponding sizes.
Time is required. Code will try and pull from DOD, unless set
through this variable
Note: names need to match exactly what is in the dod
i.e. {'drop_diameter': 50, 'time': 1440}
version : string
Version number of the ingest to use. If not set, defaults to
latest version
fill_value : float
Fill value for non-dimension variables. Dimensions cannot have
duplicate values and are incrementally set (0, 1, 2)
scalar_fill_dim : str
Depending on how the dataset is set up, sometimes the scalar values
are dimensioned to the main dimension. i.e. a lat/lon is set to have
a dimension of time. This is a way to set it up similarly.
local_file: bool
If true, the DOD will be loaded from a file whose name is proc.
If false, the DOD will be pulled from PCM.
Returns
-------
ds : xarray.Dataset
ACT Xarray dataset populated with all variables and attributes.
Examples
--------
.. code-block :: python
dims = {'time': 1440, 'drop_diameter': 50}
ds = act.io.armfiles.create_ds_from_arm_dod(
'vdis.b1', dims, version='1.2', scalar_fill_dim='time')
"""
# Set base url to get DOD information
if local_file is False:
base_url = 'https://pcm.arm.gov/pcm/api/dods/'
# Get data from DOD api
with urllib.request.urlopen(base_url + proc) as url:
data = json.loads(url.read().decode())
else:
with open(proc) as file:
data = json.loads(file.read())
# Check version numbers and alert if requested version in not available
keys = list(data['versions'].keys())
if version not in keys:
warnings.warn(
' '.join(
['Version:', version, 'not available or not specified. Using Version:', keys[-1]]
),
UserWarning,
)
version = keys[-1]
# Create empty xarray dataset
ds = xr.Dataset()
# Get the global attributes and add to dataset
atts = {}
for a in data['versions'][version]['atts']:
if a['name'] == 'string':
continue
if a['value'] is None:
a['value'] = ''
atts[a['name']] = a['value']
ds.attrs = atts
# Get variable information and create dataarrays that are
# then added to the dataset
# If not passed in through set_dims, will look to the DOD
# if not set in the DOD, then will raise error
variables = data['versions'][version]['vars']
dod_dims = data['versions'][version]['dims']
for d in dod_dims:
if d['name'] not in list(set_dims.keys()):
if d['length'] > 0:
set_dims[d['name']] = d['length']
else:
raise ValueError(
'Dimension length not set in DOD for '
+ d['name']
+ ', nor passed in through set_dim'
)
for v in variables:
dims = v['dims']
dim_shape = []
# Using provided dimension data, fill array accordingly for easy overwrite
if len(dims) == 0:
if scalar_fill_dim is None:
data_na = fill_value
else:
data_na = np.full(set_dims[scalar_fill_dim], fill_value)
v['dims'] = scalar_fill_dim
else:
for d in dims:
dim_shape.append(set_dims[d])
if len(dim_shape) == 1 and v['name'] == dims[0]:
data_na = np.arange(dim_shape[0])
else:
data_na = np.full(dim_shape, fill_value)
# Get attribute information. Had to do some things to get to print to netcdf
atts = {}
str_flag = False
for a in v['atts']:
if a['name'] == 'string':
str_flag = True
continue
if a['value'] is None:
continue
if str_flag and a['name'] == 'units':
continue
atts[a['name']] = a['value']
da = xr.DataArray(data=data_na, dims=v['dims'], name=v['name'], attrs=atts)
ds[v['name']] = da
return ds
@xr.register_dataset_accessor('write')
class WriteDataset:
"""
Class for cleaning up Dataset before writing to file.
"""
def __init__(self, xarray_ds):
self._ds = xarray_ds
def write_netcdf(
self,
cleanup_global_atts=True,
cleanup_qc_atts=True,
join_char='__',
make_copy=True,
cf_compliant=False,
delete_global_attrs=['qc_standards_version', 'qc_method', 'qc_comment'],
FillValue=-9999,
cf_convention='CF-1.8',
**kwargs,
):
"""
This is a wrapper around Dataset.to_netcdf to clean up the Dataset before
writing to disk. Some things are added to global attributes during ACT reading
process, and QC variables attributes are modified during QC cleanup process.
This will modify before writing to disk to better
match Climate & Forecast standards.
Parameters
----------
cleanup_global_atts : boolean
Option to cleanup global attributes by removing any global attribute
that starts with an underscore.
cleanup_qc_atts : boolean
Option to convert attributes that would be written as string array
to be a single character string. CF 1.7 does not allow string attribures.
Will use a single space a delimeter between values and join_char to replace
white space between words.
join_char : str
The character sting to use for replacing white spaces between words when converting
a list of strings to single character string attributes.
make_copy : boolean
Make a copy before modifying Dataset to write. For large Datasets this
may add processing time and memory. If modifying the Dataset is OK
try setting to False.
cf_compliant : boolean
Option to output file with additional attributes to make file Climate & Forecast
complient. May require runing .clean.cleanup() method on the dataset to fix other
issues first. This does the best it can but it may not be truely complient. You
should read the CF documents and try to make complient before writing to file.
delete_global_attrs : list
Optional global attributes to be deleted. Defaults to some standard
QC attributes that are not needed. Can add more or set to None to not
remove the attributes.
FillValue : int, float
The value to use as a _FillValue in output file. This is used to fix
issues with how Xarray handles missing_value upon reading. It's confusing
so not a perfect fix. Set to None to leave Xarray to do what it wants.
Set to a value to be the value used as _FillValue in the file and data
array. This should then remove missing_value attribute from the file as well.
cf_convention : str
The Climate and Forecast convention string to add to Conventions attribute.
**kwargs : keywords
Keywords to pass through to Dataset.to_netcdf()
Examples
--------
.. code-block :: python
ds.write.write_netcdf(path='output.nc')
"""
if make_copy:
write_ds = copy.deepcopy(self._ds)
else:
write_ds = self._ds
encoding = {}
if cleanup_global_atts:
for attr in list(write_ds.attrs):
if attr.startswith('_'):
del write_ds.attrs[attr]
if cleanup_qc_atts:
check_atts = ['flag_meanings', 'flag_assessments']
for var_name in list(write_ds.data_vars):
if 'standard_name' not in write_ds[var_name].attrs.keys():
continue
for attr_name in check_atts:
try:
att_values = write_ds[var_name].attrs[attr_name]
if isinstance(att_values, (list, tuple)):
att_values = [att_value.replace(' ', join_char) for att_value in att_values]
write_ds[var_name].attrs[attr_name] = ' '.join(att_values)
except KeyError:
pass
# Tell .to_netcdf() to not add a _FillValue attribute for
# quality control variables.
if FillValue is not None:
encoding[var_name] = {'_FillValue': None}
# Clean up _FillValue vs missing_value mess by creating an
# encoding dictionary with each variable's _FillValue set to
# requested fill value. May need to improve upon this for data type
# and other issues in the future.
if FillValue is not None:
skip_variables = ['base_time', 'time_offset', 'qc_time'] + list(encoding.keys())
for var_name in list(write_ds.data_vars):
if var_name not in skip_variables:
encoding[var_name] = {'_FillValue': FillValue}
if delete_global_attrs is not None:
for attr in delete_global_attrs:
try:
del write_ds.attrs[attr]
except KeyError:
pass
for var_name in list(write_ds.keys()):
if 'string' in list(write_ds[var_name].attrs.keys()):
att = write_ds[var_name].attrs['string']
write_ds[var_name].attrs[var_name + '_string'] = att
del write_ds[var_name].attrs['string']
# If requested update global attributes and variables attributes for required
# CF attributes.
if cf_compliant:
# Get variable names and standard name for each variable
var_names = list(write_ds.keys())
standard_names = []
for var_name in var_names:
try:
standard_names.append(write_ds[var_name].attrs['standard_name'])
except KeyError:
standard_names.append(None)
# Check if time varible has axis and standard_name attribute
coord_name = 'time'
try:
write_ds[coord_name].attrs['axis']
except KeyError:
try:
write_ds[coord_name].attrs['axis'] = 'T'
except KeyError:
pass
try:
write_ds[coord_name].attrs['standard_name']
except KeyError:
try:
write_ds[coord_name].attrs['standard_name'] = 'time'
except KeyError:
pass
# Try to determine type of dataset by coordinate dimention named time
# and other factors
try:
write_ds.attrs['FeatureType']
except KeyError:
dim_names = list(write_ds.dims)
FeatureType = None
if dim_names == ['time']:
FeatureType = 'timeSeries'
elif len(dim_names) == 2 and 'time' in dim_names and 'bound' in dim_names:
FeatureType = 'timeSeries'
elif len(dim_names) >= 2 and 'time' in dim_names:
for var_name in var_names:
dims = list(write_ds[var_name].dims)
if len(dims) == 2 and 'time' in dims:
prof_dim = list(set(dims) - {'time'})[0]
if write_ds[prof_dim].values.size > 2:
FeatureType = 'timeSeriesProfile'
break
if FeatureType is not None:
write_ds.attrs['FeatureType'] = FeatureType
# Add axis and positive attributes to variables with standard_name
# equal to 'altitude'
alt_variables = [
var_names[ii] for ii, sn in enumerate(standard_names) if sn == 'altitude'
]
for var_name in alt_variables:
try:
write_ds[var_name].attrs['axis']
except KeyError:
write_ds[var_name].attrs['axis'] = 'Z'
try:
write_ds[var_name].attrs['positive']
except KeyError:
write_ds[var_name].attrs['positive'] = 'up'
# Check if the Conventions global attribute lists the CF convention
try:
Conventions = write_ds.attrs['Conventions']
Conventions = Conventions.split()
cf_listed = False
for ii in Conventions:
if ii.startswith('CF-'):
cf_listed = True
break
if not cf_listed:
Conventions.append(cf_convention)
write_ds.attrs['Conventions'] = ' '.join(Conventions)
except KeyError:
write_ds.attrs['Conventions'] = str(cf_convention)
# Reorder global attributes to ensure history is last
try:
history = copy.copy(write_ds.attrs['history'])
del write_ds.attrs['history']
write_ds.attrs['history'] = history
except KeyError:
pass
current_time = dt.datetime.now().replace(microsecond=0)
if 'history' in list(write_ds.attrs.keys()):
write_ds.attrs['history'] += ''.join(['\n', str(current_time), ' created by ACT ', str(act.__version__),
' act.io.write.write_netcdf'])
if hasattr(write_ds, 'time_bounds') and not write_ds.time.encoding:
write_ds.time.encoding.update(write_ds.time_bounds.encoding)
write_ds.to_netcdf(encoding=encoding, **kwargs)
def check_if_tar_gz_file(filenames):
"""
Unpacks gunzip and/or TAR file contents and returns Xarray Dataset
...
Parameters
----------
filenames : str, pathlib.Path
Filenames to check if gunzip and/or tar files.
Returns
-------
filenames : Paths to extracted files from gunzip or TAR files
"""
cleanup = False
if isinstance(filenames, (str, PathLike)):
try:
if is_gunzip_file(filenames) or tarfile.is_tarfile(str(filenames)):
tmpdirname = tempfile.mkdtemp()
cleanup = True
if is_gunzip_file(filenames):
filenames = unpack_gzip(filenames, write_directory=tmpdirname)
if tarfile.is_tarfile(str(filenames)):
filenames = unpack_tar(filenames, write_directory=tmpdirname, randomize=False)
except Exception:
pass
return filenames, cleanup
def read_mmcr(filenames):
"""
Reads in ARM MMCR files and splits up the variables into specific
mode variables based on what's in the files. MMCR files have the modes
interleaved and are not readable using xarray so some modifications are
needed ahead of time.
Parameters
----------
filenames : str, pathlib.PosixPath or list of str
Name of file(s) to read.
Returns
-------
ds : xarray.Dataset (or None)
ACT Xarray dataset (or None if no data file(s) found).
"""
# Sort the files to make sure they concatenate right
filenames.sort()
# Run through each file and read it in using netCDF4, then
# read it in with xarray
multi_ds = []
for f in filenames:
nc = Dataset(f, "a")
# Change heights name to range to read appropriately to xarray
if 'heights' in nc.dimensions:
nc.renameDimension('heights', 'range')
if nc is not None:
ds = xr.open_dataset(xr.backends.NetCDF4DataStore(nc))
multi_ds.append(ds)
# Concatenate datasets together
if len(multi_ds) > 1:
ds = xr.concat(multi_ds, dim='time')
else:
ds = multi_ds[0]
# Get mdoes and ranges with time/height modes
modes = ds['mode'].values
mode_vars = []
for v in ds:
if 'range' in ds[v].dims and 'time' in ds[v].dims and len(ds[v].dims) == 2:
mode_vars.append(v)
# For each mode, run extract data variables if available
# saves as individual variables in the file.
for m in modes:
if len(ds['ModeDescription'].shape) > 1:
mode_desc = ds['ModeDescription'].values[0, m]
if np.isnan(ds['heights'].values[0, m, :]).all():
continue
range_data = ds['heights'].values[0, m, :]
else:
mode_desc = ds['ModeDescription'].values[m]
if np.isnan(ds['heights'].values[m, :]).all():
continue
range_data = ds['heights'].values[m, :]
mode_desc = str(mode_desc).split('_')[-1][0:-1]
mode_desc = str(mode_desc).split('\'')[0]
idx = np.where(ds['ModeNum'].values == m)[0]
idy = np.where(~np.isnan(range_data))[0]
for v in mode_vars:
new_var_name = v + '_' + mode_desc
time_name = 'time_' + mode_desc
range_name = 'range_' + mode_desc
data = ds[v].values[idx, :]
data = data[:, idy]
attrs = ds[v].attrs
da = xr.DataArray(
data=data,
coords={time_name: ds['time'].values[idx], range_name: range_data[idy]},
dims=[time_name, range_name],
attrs=attrs
)
ds[new_var_name] = da
return ds
<|code_end|>
|
act/io/armfiles.py
<|code_start|>"""
This module contains I/O operations for loading files that were created for the
Atmospheric Radiation Measurement program supported by the Department of Energy
Office of Science.
"""
import copy
import datetime as dt
import glob
import json
import re
import tarfile
import tempfile
import urllib
import warnings
from os import PathLike
from pathlib import Path, PosixPath
import numpy as np
import xarray as xr
from cftime import num2date
from netCDF4 import Dataset
import act
import act.utils as utils
from act.config import DEFAULT_DATASTREAM_NAME
from act.utils.io_utils import cleanup_files, is_gunzip_file, unpack_gzip, unpack_tar
def read_netcdf(
filenames,
concat_dim=None,
return_None=False,
combine='by_coords',
decode_times=True,
use_cftime=True,
use_base_time=False,
combine_attrs='override',
cleanup_qc=False,
keep_variables=None,
**kwargs,
):
"""
Returns `xarray.Dataset` with stored data and metadata from a user-defined
query of ARM-standard netCDF files from a single datastream. Has some procedures
to ensure time is correctly fomatted in returned Dataset.
Parameters
----------
filenames : str, pathlib.PosixPath, list of str, list of pathlib.PosixPath
Name of file(s) to read.
concat_dim : str
Dimension to concatenate files along.
return_None : boolean
Catch IOError exception when file not found and return None.
Default is False.
combine : str
String used by xarray.open_mfdataset() to determine how to combine
data files into one Dataset. See Xarray documentation for options.
decode_times : boolean
Standard Xarray option to decode time values from int/float to python datetime values.
Appears the default is to do this anyway but need this option to allow correct usage
of use_base_time.
use_cftime : boolean
Option to use cftime library to parse the time units string and correctly
establish the time values with a units string containing timezone offset.
This is used because the Pandas units string parser does not correctly recognize
time zone offset. Code will automatically detect cftime object and convert to datetime64
in returned Dataset.
use_base_time : boolean
Option to use ARM time variables base_time and time_offset. Useful when the time variable
is not included (older files) or when the units attribute is incorrectly formatted. Will use
the values of base_time and time_offset as seconds since epoch and create datetime64 values
for time coordinate. If set will change decode_times and use_cftime to False.
combine_attrs : str
String indicating how to combine attrs of the datasets being merged
cleanup_qc : boolean
Call clean.cleanup() method to convert to standardized ancillary quality control
variables. This will not allow any keyword options, so if non-default behavior is
desired will need to call clean.cleanup() method on the dataset after reading the data.
keep_variables : str or list of str
Variable names to read from data file. Works by creating a list of variable names
to exclude from reading and passing into open_mfdataset() via drop_variables keyword.
Still allows use of drop_variables keyword for variables not listed in first file to
read.
**kwargs : keywords
Keywords to pass through to xarray.open_mfdataset().
Returns
-------
ds : xarray.Dataset (or None)
ACT Xarray dataset (or None if no data file(s) found).
Examples
--------
This example will load the example sounding data used for unit testing.
.. code-block :: python
import act
ds = act.io.armfiles.read_netcdf(act.tests.sample_files.EXAMPLE_SONDE_WILDCARD)
print(ds)
"""
message = 'act.io.armfiles.read_netcdf will be replaced in version 2.0.0 by act.io.arm.read_arm_netcdf()'
warnings.warn(message, DeprecationWarning, 2)
ds = None
filenames, cleanup_temp_directory = check_if_tar_gz_file(filenames)
file_dates = []
file_times = []
# If requested to use base_time and time_offset, set keywords to correct attribute values
# to pass into xarray open_mfdataset(). Need to turn off decode_times and use_cftime
# or else will try to convert base_time and time_offset. Depending on values of attributes
# may cause a failure.
if use_base_time:
decode_times = False
use_cftime = False
# Add funciton keywords to kwargs dictionary for passing into open_mfdataset.
kwargs['combine'] = combine
kwargs['concat_dim'] = concat_dim
kwargs['decode_times'] = decode_times
kwargs['use_cftime'] = use_cftime
if len(filenames) > 1 and not isinstance(filenames, str):
kwargs['combine_attrs'] = combine_attrs
# Check if keep_variables is set. If so determine correct drop_variables
if keep_variables is not None:
drop_variables = None
if 'drop_variables' in kwargs.keys():
drop_variables = kwargs['drop_variables']
kwargs['drop_variables'] = keep_variables_to_drop_variables(
filenames, keep_variables, drop_variables=drop_variables
)
# Create an exception tuple to use with try statements. Doing it this way
# so we can add the FileNotFoundError if requested. Can add more error
# handling in the future.
except_tuple = (ValueError,)
if return_None:
except_tuple = except_tuple + (FileNotFoundError, OSError)
try:
# Read data file with Xarray function
ds = xr.open_mfdataset(filenames, **kwargs)
except except_tuple as exception:
# If requested return None for File not found error
if type(exception).__name__ == 'FileNotFoundError':
return None
# If requested return None for File not found error
if type(exception).__name__ == 'OSError' and exception.args[0] == 'no files to open':
return None
# Look at error message and see if could be nested error message. If so
# update combine keyword and try again. This should allow reading files
# without a time variable but base_time and time_offset variables.
if (
kwargs['combine'] != 'nested'
and type(exception).__name__ == 'ValueError'
and exception.args[0] == 'Could not find any dimension coordinates '
'to use to order the datasets for concatenation'
):
kwargs['combine'] = 'nested'
ds = xr.open_mfdataset(filenames, **kwargs)
else:
# When all else fails raise the orginal exception
raise exception
# If requested use base_time and time_offset to derive time. Assumes that the units
# of both are in seconds and that the value is number of seconds since epoch.
if use_base_time:
time = num2date(
ds['base_time'].values + ds['time_offset'].values, ds['base_time'].attrs['units']
)
time = time.astype('datetime64[ns]')
# Need to use a new Dataset creation to correctly index time for use with
# .group and .resample methods in Xarray Datasets.
temp_ds = xr.Dataset({'time': (ds['time'].dims, time, ds['time'].attrs)})
ds['time'] = temp_ds['time']
del temp_ds
for att_name in ['units', 'ancillary_variables']:
try:
del ds['time'].attrs[att_name]
except KeyError:
pass
# Xarray has issues reading a CF formatted time units string if it contains
# timezone offset without a [+|-] preceeding timezone offset.
# https://github.com/pydata/xarray/issues/3644
# To ensure the times are read in correctly need to set use_cftime=True.
# This will read in time as cftime object. But Xarray uses numpy datetime64
# natively. This will convert the cftime time values to numpy datetime64.
desired_time_precision = 'datetime64[ns]'
for var_name in ['time', 'time_offset']:
try:
if 'time' in ds.dims and type(ds[var_name].values[0]).__module__.startswith('cftime.'):
# If we just convert time to datetime64 the group, sel, and other Xarray
# methods will not work correctly because time is not indexed. Need to
# use the formation of a Dataset to correctly set the time indexing.
temp_ds = xr.Dataset(
{
var_name: (
ds[var_name].dims,
ds[var_name].values.astype(desired_time_precision),
ds[var_name].attrs,
)
}
)
ds[var_name] = temp_ds[var_name]
del temp_ds
# If time_offset is in file try to convert base_time as well
if var_name == 'time_offset':
ds['base_time'].values = ds['base_time'].values.astype(desired_time_precision)
ds['base_time'] = ds['base_time'].astype(desired_time_precision)
except KeyError:
pass
# Check if "time" variable is not in the netCDF file. If so try to use
# base_time and time_offset to make time variable. Basically a fix for incorrectly
# formatted files. May require using decode_times=False to initially read the data.
if 'time' in ds.dims and not np.issubdtype(ds['time'].dtype, np.datetime64):
try:
ds['time'] = ds['time_offset']
except (KeyError, ValueError):
pass
# Adding support for wildcards
if isinstance(filenames, str):
filenames = glob.glob(filenames)
elif isinstance(filenames, PosixPath):
filenames = [filenames]
# Get file dates and times that were read in to the dataset
filenames.sort()
for f in filenames:
f = Path(f).name
pts = re.match(r'(^[a-zA-Z0-9]+)\.([0-9a-z]{2})\.([\d]{8})\.([\d]{6})\.([a-z]{2,3}$)', f)
# If Not ARM format, read in first time for info
if pts is not None:
pts = pts.groups()
file_dates.append(pts[2])
file_times.append(pts[3])
else:
if ds['time'].size > 1:
dummy = ds['time'].values[0]
else:
dummy = ds['time'].values
file_dates.append(utils.numpy_to_arm_date(dummy))
file_times.append(utils.numpy_to_arm_date(dummy, returnTime=True))
# Add attributes
ds.attrs['_file_dates'] = file_dates
ds.attrs['_file_times'] = file_times
is_arm_file_flag = check_arm_standards(ds)
# Ensure that we have _datastream set whether or no there's
# a datastream attribute already.
if is_arm_file_flag == 0:
ds.attrs['_datastream'] = DEFAULT_DATASTREAM_NAME
else:
ds.attrs['_datastream'] = ds.attrs['datastream']
ds.attrs['_arm_standards_flag'] = is_arm_file_flag
if cleanup_qc:
ds.clean.cleanup()
if cleanup_temp_directory:
cleanup_files(files=filenames)
return ds
def keep_variables_to_drop_variables(filenames, keep_variables, drop_variables=None):
"""
Returns a list of variable names to exclude from reading by passing into
`Xarray.open_dataset` drop_variables keyword. This can greatly help reduce
loading time and disk space use of the Dataset.
When passed a netCDF file name, will open the file using the netCDF4 library to get
list of variable names. There is less overhead reading the varible names using
netCDF4 library than Xarray. If more than one filename is provided or string is
used for shell syntax globbing, will use the first file in the list.
Parameters
----------
filenames : str, pathlib.PosixPath or list of str
Name of file(s) to read.
keep_variables : str or list of str
Variable names desired to keep. Do not need to list associated dimention
names. These will be automatically kept as well.
drop_variables : str or list of str
Variable names to explicitly add to returned list. May be helpful if a variable
exists in a file that is not in the first file in the list.
Returns
-------
drop_vars : list of str
Variable names to exclude from returned Dataset by using drop_variables keyword
when calling Xarray.open_dataset().
Examples
--------
.. code-block :: python
import act
filename = '/data/datastream/hou/houkasacrcfrM1.a1/houkasacrcfrM1.a1.20220404.*.nc'
drop_vars = act.io.armfiles.keep_variables_to_drop_variables(
filename, ['lat','lon','alt','crosspolar_differential_phase'],
drop_variables='variable_name_that_only_exists_in_last_file_of_the_day')
"""
read_variables = []
return_variables = []
if isinstance(keep_variables, str):
keep_variables = [keep_variables]
if isinstance(drop_variables, str):
drop_variables = [drop_variables]
# If filenames is a list subset to first file name.
if isinstance(filenames, (list, tuple)):
filename = filenames[0]
# If filenames is a string, check if it needs to be expanded in shell
# first. Then use first returned file name. Else use the string filename.
elif isinstance(filenames, str):
filename = glob.glob(filenames)
if len(filename) == 0:
return return_variables
else:
filename.sort()
filename = filename[0]
# Use netCDF4 library to extract the variable and dimension names.
rootgrp = Dataset(filename, 'r')
read_variables = list(rootgrp.variables)
# Loop over the variables to exclude needed coordinate dimention names.
dims_to_keep = []
for var_name in keep_variables:
try:
dims_to_keep.extend(list(rootgrp[var_name].dimensions))
except IndexError:
pass
rootgrp.close()
# Remove names not matching keep_varibles excluding the associated coordinate dimentions
return_variables = set(read_variables) - set(keep_variables) - set(dims_to_keep)
# Add drop_variables to list
if drop_variables is not None:
return_variables = set(return_variables) | set(drop_variables)
return list(return_variables)
def check_arm_standards(ds):
"""
Checks to see if an xarray dataset conforms to ARM standards.
Parameters
----------
ds : Xarray Dataset
The dataset to check.
Returns
-------
flag : int
The flag corresponding to whether or not the file conforms
to ARM standards. Bit packed, so 0 for no, 1 for yes
"""
the_flag = 1 << 0
if 'datastream' not in ds.attrs.keys():
the_flag = 0
# Check if the historical global attribute name is
# used instead of updated name of 'datastream'. If so
# correct the global attributes and flip flag.
if 'zeb_platform' in ds.attrs.keys():
ds.attrs['datastream'] = copy.copy(ds.attrs['zeb_platform'])
del ds.attrs['zeb_platform']
the_flag = 1 << 0
return the_flag
def create_ds_from_arm_dod(
proc, set_dims, version='', fill_value=-9999.0, scalar_fill_dim=None, local_file=False
):
"""
Queries the ARM DOD api and builds a dataset based on the ARM DOD and
the dimension sizes that are passed in.
Parameters
----------
proc : string
Process to create the dataset off of. This is normally in the
format of inst.level. i.e. vdis.b1 or kazrge.a1. If local file
is true, this points to the path of the .dod file.
set_dims : dict
Dictionary of dims from the DOD and the corresponding sizes.
Time is required. Code will try and pull from DOD, unless set
through this variable
Note: names need to match exactly what is in the dod
i.e. {'drop_diameter': 50, 'time': 1440}
version : string
Version number of the ingest to use. If not set, defaults to
latest version
fill_value : float
Fill value for non-dimension variables. Dimensions cannot have
duplicate values and are incrementally set (0, 1, 2)
scalar_fill_dim : str
Depending on how the dataset is set up, sometimes the scalar values
are dimensioned to the main dimension. i.e. a lat/lon is set to have
a dimension of time. This is a way to set it up similarly.
local_file: bool
If true, the DOD will be loaded from a file whose name is proc.
If false, the DOD will be pulled from PCM.
Returns
-------
ds : xarray.Dataset
ACT Xarray dataset populated with all variables and attributes.
Examples
--------
.. code-block :: python
dims = {'time': 1440, 'drop_diameter': 50}
ds = act.io.armfiles.create_ds_from_arm_dod(
'vdis.b1', dims, version='1.2', scalar_fill_dim='time')
"""
# Set base url to get DOD information
if local_file is False:
base_url = 'https://pcm.arm.gov/pcm/api/dods/'
# Get data from DOD api
with urllib.request.urlopen(base_url + proc) as url:
data = json.loads(url.read().decode())
else:
with open(proc) as file:
data = json.loads(file.read())
# Check version numbers and alert if requested version in not available
keys = list(data['versions'].keys())
if version not in keys:
warnings.warn(
' '.join(
['Version:', version, 'not available or not specified. Using Version:', keys[-1]]
),
UserWarning,
)
version = keys[-1]
# Create empty xarray dataset
ds = xr.Dataset()
# Get the global attributes and add to dataset
atts = {}
for a in data['versions'][version]['atts']:
if a['name'] == 'string':
continue
if a['value'] is None:
a['value'] = ''
atts[a['name']] = a['value']
ds.attrs = atts
# Get variable information and create dataarrays that are
# then added to the dataset
# If not passed in through set_dims, will look to the DOD
# if not set in the DOD, then will raise error
variables = data['versions'][version]['vars']
dod_dims = data['versions'][version]['dims']
for d in dod_dims:
if d['name'] not in list(set_dims.keys()):
if d['length'] > 0:
set_dims[d['name']] = d['length']
else:
raise ValueError(
'Dimension length not set in DOD for '
+ d['name']
+ ', nor passed in through set_dim'
)
for v in variables:
dims = v['dims']
dim_shape = []
# Using provided dimension data, fill array accordingly for easy overwrite
if len(dims) == 0:
if scalar_fill_dim is None:
data_na = fill_value
else:
data_na = np.full(set_dims[scalar_fill_dim], fill_value)
v['dims'] = scalar_fill_dim
else:
for d in dims:
dim_shape.append(set_dims[d])
if len(dim_shape) == 1 and v['name'] == dims[0]:
data_na = np.arange(dim_shape[0])
else:
data_na = np.full(dim_shape, fill_value)
# Get attribute information. Had to do some things to get to print to netcdf
atts = {}
str_flag = False
for a in v['atts']:
if a['name'] == 'string':
str_flag = True
continue
if a['value'] is None:
continue
if str_flag and a['name'] == 'units':
continue
atts[a['name']] = a['value']
da = xr.DataArray(data=data_na, dims=v['dims'], name=v['name'], attrs=atts)
ds[v['name']] = da
return ds
@xr.register_dataset_accessor('write')
class WriteDataset:
"""
Class for cleaning up Dataset before writing to file.
"""
def __init__(self, xarray_ds):
self._ds = xarray_ds
def write_netcdf(
self,
cleanup_global_atts=True,
cleanup_qc_atts=True,
join_char='__',
make_copy=True,
cf_compliant=False,
delete_global_attrs=['qc_standards_version', 'qc_method', 'qc_comment'],
FillValue=-9999,
cf_convention='CF-1.8',
**kwargs,
):
"""
This is a wrapper around Dataset.to_netcdf to clean up the Dataset before
writing to disk. Some things are added to global attributes during ACT reading
process, and QC variables attributes are modified during QC cleanup process.
This will modify before writing to disk to better
match Climate & Forecast standards.
Parameters
----------
cleanup_global_atts : boolean
Option to cleanup global attributes by removing any global attribute
that starts with an underscore.
cleanup_qc_atts : boolean
Option to convert attributes that would be written as string array
to be a single character string. CF 1.7 does not allow string attribures.
Will use a single space a delimeter between values and join_char to replace
white space between words.
join_char : str
The character sting to use for replacing white spaces between words when converting
a list of strings to single character string attributes.
make_copy : boolean
Make a copy before modifying Dataset to write. For large Datasets this
may add processing time and memory. If modifying the Dataset is OK
try setting to False.
cf_compliant : boolean
Option to output file with additional attributes to make file Climate & Forecast
complient. May require runing .clean.cleanup() method on the dataset to fix other
issues first. This does the best it can but it may not be truely complient. You
should read the CF documents and try to make complient before writing to file.
delete_global_attrs : list
Optional global attributes to be deleted. Defaults to some standard
QC attributes that are not needed. Can add more or set to None to not
remove the attributes.
FillValue : int, float
The value to use as a _FillValue in output file. This is used to fix
issues with how Xarray handles missing_value upon reading. It's confusing
so not a perfect fix. Set to None to leave Xarray to do what it wants.
Set to a value to be the value used as _FillValue in the file and data
array. This should then remove missing_value attribute from the file as well.
cf_convention : str
The Climate and Forecast convention string to add to Conventions attribute.
**kwargs : keywords
Keywords to pass through to Dataset.to_netcdf()
Examples
--------
.. code-block :: python
ds.write.write_netcdf(path='output.nc')
"""
if make_copy:
write_ds = copy.deepcopy(self._ds)
else:
write_ds = self._ds
encoding = {}
if cleanup_global_atts:
for attr in list(write_ds.attrs):
if attr.startswith('_'):
del write_ds.attrs[attr]
if cleanup_qc_atts:
check_atts = ['flag_meanings', 'flag_assessments']
for var_name in list(write_ds.data_vars):
if 'standard_name' not in write_ds[var_name].attrs.keys():
continue
for attr_name in check_atts:
try:
att_values = write_ds[var_name].attrs[attr_name]
if isinstance(att_values, (list, tuple)):
att_values = [
att_value.replace(' ', join_char) for att_value in att_values
]
write_ds[var_name].attrs[attr_name] = ' '.join(att_values)
except KeyError:
pass
# Tell .to_netcdf() to not add a _FillValue attribute for
# quality control variables.
if FillValue is not None:
encoding[var_name] = {'_FillValue': None}
# Clean up _FillValue vs missing_value mess by creating an
# encoding dictionary with each variable's _FillValue set to
# requested fill value. May need to improve upon this for data type
# and other issues in the future.
if FillValue is not None:
skip_variables = ['base_time', 'time_offset', 'qc_time'] + list(encoding.keys())
for var_name in list(write_ds.data_vars):
if var_name not in skip_variables:
encoding[var_name] = {'_FillValue': FillValue}
if delete_global_attrs is not None:
for attr in delete_global_attrs:
try:
del write_ds.attrs[attr]
except KeyError:
pass
for var_name in list(write_ds.keys()):
if 'string' in list(write_ds[var_name].attrs.keys()):
att = write_ds[var_name].attrs['string']
write_ds[var_name].attrs[var_name + '_string'] = att
del write_ds[var_name].attrs['string']
# If requested update global attributes and variables attributes for required
# CF attributes.
if cf_compliant:
# Get variable names and standard name for each variable
var_names = list(write_ds.keys())
standard_names = []
for var_name in var_names:
try:
standard_names.append(write_ds[var_name].attrs['standard_name'])
except KeyError:
standard_names.append(None)
# Check if time varible has axis and standard_name attribute
coord_name = 'time'
try:
write_ds[coord_name].attrs['axis']
except KeyError:
try:
write_ds[coord_name].attrs['axis'] = 'T'
except KeyError:
pass
try:
write_ds[coord_name].attrs['standard_name']
except KeyError:
try:
write_ds[coord_name].attrs['standard_name'] = 'time'
except KeyError:
pass
# Try to determine type of dataset by coordinate dimention named time
# and other factors
try:
write_ds.attrs['FeatureType']
except KeyError:
dim_names = list(write_ds.dims)
FeatureType = None
if dim_names == ['time']:
FeatureType = 'timeSeries'
elif len(dim_names) == 2 and 'time' in dim_names and 'bound' in dim_names:
FeatureType = 'timeSeries'
elif len(dim_names) >= 2 and 'time' in dim_names:
for var_name in var_names:
dims = list(write_ds[var_name].dims)
if len(dims) == 2 and 'time' in dims:
prof_dim = list(set(dims) - {'time'})[0]
if write_ds[prof_dim].values.size > 2:
FeatureType = 'timeSeriesProfile'
break
if FeatureType is not None:
write_ds.attrs['FeatureType'] = FeatureType
# Add axis and positive attributes to variables with standard_name
# equal to 'altitude'
alt_variables = [
var_names[ii] for ii, sn in enumerate(standard_names) if sn == 'altitude'
]
for var_name in alt_variables:
try:
write_ds[var_name].attrs['axis']
except KeyError:
write_ds[var_name].attrs['axis'] = 'Z'
try:
write_ds[var_name].attrs['positive']
except KeyError:
write_ds[var_name].attrs['positive'] = 'up'
# Check if the Conventions global attribute lists the CF convention
try:
Conventions = write_ds.attrs['Conventions']
Conventions = Conventions.split()
cf_listed = False
for ii in Conventions:
if ii.startswith('CF-'):
cf_listed = True
break
if not cf_listed:
Conventions.append(cf_convention)
write_ds.attrs['Conventions'] = ' '.join(Conventions)
except KeyError:
write_ds.attrs['Conventions'] = str(cf_convention)
# Reorder global attributes to ensure history is last
try:
history = copy.copy(write_ds.attrs['history'])
del write_ds.attrs['history']
write_ds.attrs['history'] = history
except KeyError:
pass
current_time = dt.datetime.now().replace(microsecond=0)
if 'history' in list(write_ds.attrs.keys()):
write_ds.attrs['history'] += ''.join(
[
'\n',
str(current_time),
' created by ACT ',
str(act.__version__),
' act.io.write.write_netcdf',
]
)
if hasattr(write_ds, 'time_bounds') and not write_ds.time.encoding:
write_ds.time.encoding.update(write_ds.time_bounds.encoding)
write_ds.to_netcdf(encoding=encoding, **kwargs)
def check_if_tar_gz_file(filenames):
"""
Unpacks gunzip and/or TAR file contents and returns Xarray Dataset
...
Parameters
----------
filenames : str, pathlib.Path
Filenames to check if gunzip and/or tar files.
Returns
-------
filenames : Paths to extracted files from gunzip or TAR files
"""
cleanup = False
if isinstance(filenames, (str, PathLike)):
try:
if is_gunzip_file(filenames) or tarfile.is_tarfile(str(filenames)):
tmpdirname = tempfile.mkdtemp()
cleanup = True
if is_gunzip_file(filenames):
filenames = unpack_gzip(filenames, write_directory=tmpdirname)
if tarfile.is_tarfile(str(filenames)):
filenames = unpack_tar(filenames, write_directory=tmpdirname, randomize=False)
except Exception:
pass
return filenames, cleanup
def read_mmcr(filenames):
"""
Reads in ARM MMCR files and splits up the variables into specific
mode variables based on what's in the files. MMCR files have the modes
interleaved and are not readable using xarray so some modifications are
needed ahead of time.
Parameters
----------
filenames : str, pathlib.PosixPath or list of str
Name of file(s) to read.
Returns
-------
ds : xarray.Dataset (or None)
ACT Xarray dataset (or None if no data file(s) found).
"""
# Sort the files to make sure they concatenate right
filenames.sort()
# Run through each file and read it in using netCDF4, then
# read it in with xarray
multi_ds = []
for f in filenames:
nc = Dataset(f, 'a')
# Change heights name to range to read appropriately to xarray
if 'heights' in nc.dimensions:
nc.renameDimension('heights', 'range')
if nc is not None:
ds = xr.open_dataset(xr.backends.NetCDF4DataStore(nc))
multi_ds.append(ds)
# Concatenate datasets together
if len(multi_ds) > 1:
ds = xr.concat(multi_ds, dim='time')
else:
ds = multi_ds[0]
# Get mdoes and ranges with time/height modes
modes = ds['mode'].values
mode_vars = []
for v in ds:
if 'range' in ds[v].dims and 'time' in ds[v].dims and len(ds[v].dims) == 2:
mode_vars.append(v)
# For each mode, run extract data variables if available
# saves as individual variables in the file.
for m in modes:
if len(ds['ModeDescription'].shape) > 1:
mode_desc = ds['ModeDescription'].values[0, m]
if np.isnan(ds['heights'].values[0, m, :]).all():
continue
range_data = ds['heights'].values[0, m, :]
else:
mode_desc = ds['ModeDescription'].values[m]
if np.isnan(ds['heights'].values[m, :]).all():
continue
range_data = ds['heights'].values[m, :]
mode_desc = str(mode_desc).split('_')[-1][0:-1]
mode_desc = str(mode_desc).split('\'')[0]
idx = np.where(ds['ModeNum'].values == m)[0]
idy = np.where(~np.isnan(range_data))[0]
for v in mode_vars:
new_var_name = v + '_' + mode_desc
time_name = 'time_' + mode_desc
range_name = 'range_' + mode_desc
data = ds[v].values[idx, :]
data = data[:, idy]
attrs = ds[v].attrs
da = xr.DataArray(
data=data,
coords={time_name: ds['time'].values[idx], range_name: range_data[idy]},
dims=[time_name, range_name],
attrs=attrs,
)
ds[new_var_name] = da
return ds
<|code_end|>
|
Error while using add_dqr_to_qc: flag_values and flag_masks not set as expected
* ACT version: 1.5.3
* Python version: 3.11.4
* Operating System: windows
Trying to add DQRs into QC variable for 'lv_e' (latent flux) within the 30ecor datastream from SGP.
fluxdata = act.io.armfiles.read_netcdf('sgp30ecorE2.b1.20050622.000000.cdf', keep_variables=['lv_e','wind_dir','qc_lv_e'])
fluxdata = act.qc.arm.add_dqr_to_qc(fluxdata, variable=['lv_e'])
Using 'act.qc.arm.add_dqr_to_qc' on sgp30ecorE2.b1.20050622.000000.cdf or sgp30ecorE6.b1.20040705.000000.cdf. The latter is associated with the following DQR: https://adc.arm.gov//ArchiveServices/DQRService?dqrid=D050524.3
Using 'cleanup_qc=True' did not help. Neither did running clean.cleanup() after reading the file. Error is provided below
Update: Updated the package from 1.5.1 to 1.5.3 and that did not fix the original issue. Now also getting an additional error : ConnectionError: ('Connection aborted.', RemoteDisconnected('Remote end closed connection without response'))
---------------------------------------------------------------------------
KeyError Traceback (most recent call last)
File ~\AppData\Local\anaconda3\Lib\site-packages\act\qc\qcfilter.py:658, in QCFilter.available_bit(self, qc_var_name, recycle)
657 try:
--> 658 flag_masks = self._ds[qc_var_name].attrs['flag_masks']
659 flag_value = False
KeyError: 'flag_masks'
During handling of the above exception, another exception occurred:
KeyError Traceback (most recent call last)
File ~\AppData\Local\anaconda3\Lib\site-packages\act\qc\qcfilter.py:662, in QCFilter.available_bit(self, qc_var_name, recycle)
661 try:
--> 662 flag_masks = self._ds[qc_var_name].attrs['flag_values']
663 flag_value = True
KeyError: 'flag_values'
During handling of the above exception, another exception occurred:
KeyError Traceback (most recent call last)
File ~\AppData\Local\anaconda3\Lib\site-packages\act\qc\qcfilter.py:666, in QCFilter.available_bit(self, qc_var_name, recycle)
665 try:
--> 666 self._ds[qc_var_name].attrs['flag_values']
667 flag_masks = self._ds[qc_var_name].attrs['flag_masks']
KeyError: 'flag_values'
During handling of the above exception, another exception occurred:
ValueError Traceback (most recent call last)
Cell In[118], line 60
57 fluxdata = act.io.armfiles.read_netcdf('sgp30ecor'+site[0:3]+'.b1/sgp30ecor'+site[0:3]+'.b1.'+date+'.000000.cdf')
59 # Add DQRs to QC variable
---> 60 fluxdata = act.qc.arm.add_dqr_to_qc(fluxdata, variable=['lv_e'])
61 wdir=fluxdata.wind_dir.where(fluxdata.qc_wind_dir==0)
62 latent_flux_1=fluxdata.lv_e.where(fluxdata.qc_lv_e==0)
File ~\AppData\Local\anaconda3\Lib\site-packages\act\qc\arm.py:179, in add_dqr_to_qc(ds, variable, assessment, exclude, include, normalize_assessment, cleanup_qc, dqr_link, skip_location_vars)
177 for key, value in dqr_results.items():
178 try:
--> 179 ds.qcfilter.add_test(
180 var_name,
181 index=value['index'],
182 test_meaning=value['test_meaning'],
183 test_assessment=value['test_assessment'],
184 )
185 except IndexError:
186 print(f"Skipping '{var_name}' DQR application because of IndexError")
File ~\AppData\Local\anaconda3\Lib\site-packages\act\qc\qcfilter.py:345, in QCFilter.add_test(self, var_name, index, test_number, test_meaning, test_assessment, flag_value, recycle)
342 qc_var_name = self._ds.qcfilter.check_for_ancillary_qc(var_name, flag_type=flag_value)
344 if test_number is None:
--> 345 test_number = self._ds.qcfilter.available_bit(qc_var_name, recycle=recycle)
347 self._ds.qcfilter.set_test(var_name, index, test_number, flag_value)
349 if flag_value:
File ~\AppData\Local\anaconda3\Lib\site-packages\act\qc\qcfilter.py:670, in QCFilter.available_bit(self, qc_var_name, recycle)
668 flag_value = False
669 except KeyError:
--> 670 raise ValueError(
671 'Problem getting next value from '
672 'available_bit(). flag_values and '
673 'flag_masks not set as expected'
674 )
676 if flag_masks == []:
677 next_bit = 1
ValueError: Problem getting next value from available_bit(). flag_values and flag_masks not set as expected
Error while using add_dqr_to_qc: flag_values and flag_masks not set as expected
* ACT version: 1.5.3
* Python version: 3.11.4
* Operating System: windows
Trying to add DQRs into QC variable for 'lv_e' (latent flux) within the 30ecor datastream from SGP.
fluxdata = act.io.armfiles.read_netcdf('sgp30ecorE2.b1.20050622.000000.cdf', keep_variables=['lv_e','wind_dir','qc_lv_e'])
fluxdata = act.qc.arm.add_dqr_to_qc(fluxdata, variable=['lv_e'])
Using 'act.qc.arm.add_dqr_to_qc' on sgp30ecorE2.b1.20050622.000000.cdf or sgp30ecorE6.b1.20040705.000000.cdf. The latter is associated with the following DQR: https://adc.arm.gov//ArchiveServices/DQRService?dqrid=D050524.3
Using 'cleanup_qc=True' did not help. Neither did running clean.cleanup() after reading the file. Error is provided below
Update: Updated the package from 1.5.1 to 1.5.3 and that did not fix the original issue. Now also getting an additional error : ConnectionError: ('Connection aborted.', RemoteDisconnected('Remote end closed connection without response'))
---------------------------------------------------------------------------
KeyError Traceback (most recent call last)
File ~\AppData\Local\anaconda3\Lib\site-packages\act\qc\qcfilter.py:658, in QCFilter.available_bit(self, qc_var_name, recycle)
657 try:
--> 658 flag_masks = self._ds[qc_var_name].attrs['flag_masks']
659 flag_value = False
KeyError: 'flag_masks'
During handling of the above exception, another exception occurred:
KeyError Traceback (most recent call last)
File ~\AppData\Local\anaconda3\Lib\site-packages\act\qc\qcfilter.py:662, in QCFilter.available_bit(self, qc_var_name, recycle)
661 try:
--> 662 flag_masks = self._ds[qc_var_name].attrs['flag_values']
663 flag_value = True
KeyError: 'flag_values'
During handling of the above exception, another exception occurred:
KeyError Traceback (most recent call last)
File ~\AppData\Local\anaconda3\Lib\site-packages\act\qc\qcfilter.py:666, in QCFilter.available_bit(self, qc_var_name, recycle)
665 try:
--> 666 self._ds[qc_var_name].attrs['flag_values']
667 flag_masks = self._ds[qc_var_name].attrs['flag_masks']
KeyError: 'flag_values'
During handling of the above exception, another exception occurred:
ValueError Traceback (most recent call last)
Cell In[118], line 60
57 fluxdata = act.io.armfiles.read_netcdf('sgp30ecor'+site[0:3]+'.b1/sgp30ecor'+site[0:3]+'.b1.'+date+'.000000.cdf')
59 # Add DQRs to QC variable
---> 60 fluxdata = act.qc.arm.add_dqr_to_qc(fluxdata, variable=['lv_e'])
61 wdir=fluxdata.wind_dir.where(fluxdata.qc_wind_dir==0)
62 latent_flux_1=fluxdata.lv_e.where(fluxdata.qc_lv_e==0)
File ~\AppData\Local\anaconda3\Lib\site-packages\act\qc\arm.py:179, in add_dqr_to_qc(ds, variable, assessment, exclude, include, normalize_assessment, cleanup_qc, dqr_link, skip_location_vars)
177 for key, value in dqr_results.items():
178 try:
--> 179 ds.qcfilter.add_test(
180 var_name,
181 index=value['index'],
182 test_meaning=value['test_meaning'],
183 test_assessment=value['test_assessment'],
184 )
185 except IndexError:
186 print(f"Skipping '{var_name}' DQR application because of IndexError")
File ~\AppData\Local\anaconda3\Lib\site-packages\act\qc\qcfilter.py:345, in QCFilter.add_test(self, var_name, index, test_number, test_meaning, test_assessment, flag_value, recycle)
342 qc_var_name = self._ds.qcfilter.check_for_ancillary_qc(var_name, flag_type=flag_value)
344 if test_number is None:
--> 345 test_number = self._ds.qcfilter.available_bit(qc_var_name, recycle=recycle)
347 self._ds.qcfilter.set_test(var_name, index, test_number, flag_value)
349 if flag_value:
File ~\AppData\Local\anaconda3\Lib\site-packages\act\qc\qcfilter.py:670, in QCFilter.available_bit(self, qc_var_name, recycle)
668 flag_value = False
669 except KeyError:
--> 670 raise ValueError(
671 'Problem getting next value from '
672 'available_bit(). flag_values and '
673 'flag_masks not set as expected'
674 )
676 if flag_masks == []:
677 next_bit = 1
ValueError: Problem getting next value from available_bit(). flag_values and flag_masks not set as expected
|
act/qc/clean.py
<|code_start|>"""
Class definitions for cleaning up QC variables to standard
cf-compliance.
"""
import copy
import re
import numpy as np
import xarray as xr
from act.qc.qcfilter import parse_bit
@xr.register_dataset_accessor('clean')
class CleanDataset:
"""
Class for cleaning up QC variables to standard cf-compliance
"""
def __init__(self, ds):
self._ds = ds
@property
def matched_qc_variables(self, check_arm_syntax=True):
"""
Find variables that are QC variables and return list of names.
Parameters
----------
check_arm_syntax : boolean
ARM ueses a standard of starting all quality control variables
with "qc" joined with an underscore. This is a more robust method
of getting the quality control variables before the standard_name
attribute is added. If this is true will first check using
attributes and will then check if variable starts with "qc".
Returns
-------
variables : list of str
A list of strings containing the name of each variable.
"""
# Will need to find all historical cases and add to list
description_list = [
'See global attributes for individual.+bit descriptions.',
(
'This field contains bit packed integer values, where each '
'bit represents a QC test on the data. Non-zero bits indicate '
'the QC condition given in the description for those bits; '
'a value of 0.+ indicates the data has not '
'failed any QC tests.'
),
(r'This field contains bit packed values which should be ' r'interpreted as listed..+'),
]
# Loop over each variable and look for a match to an attribute that
# would exist if the variable is a QC variable.
variables = []
for var in self._ds.data_vars:
try:
if self._ds[var].attrs['standard_name'] == 'quality_flag':
variables.append(var)
continue
except KeyError:
pass
if check_arm_syntax and var.startswith('qc_'):
variables.append(var)
continue
try:
for desc in description_list:
if re.match(desc, self._ds[var].attrs['description']) is not None:
variables.append(var)
break
except KeyError:
pass
variables = list(set(variables))
return variables
def cleanup(
self,
cleanup_arm_qc=True,
clean_arm_state_vars=None,
handle_missing_value=True,
link_qc_variables=True,
normalize_assessment=False,
cleanup_cf_qc=True,
**kwargs,
):
"""
Wrapper method to automatically call all the standard methods
for dataset cleanup.
Parameters
----------
cleanup_arm_qc : bool
Option to clean Xarray dataset from ARM QC to CF QC standards.
clean_arm_state_vars : list of str
Option to clean Xarray dataset state variables from ARM to CF
standards. Pass in list of variable names.
handle_missing_value : bool
Go through variables and look for cases where a QC or state varible
was convereted to a float and missing values set to np.nan. This
is done because of xarry's default to use mask_and_scale=True.
This will convert the data type back to integer and replace
any instances of np.nan to a missing value indicator (most
likely -9999).
link_qc_variables : bool
Option to link QC variablers through ancillary_variables if not
already set.
normalize_assessment : bool
Option to clean up assessments to use the same terminology. Set to
False for default because should only be an issue after adding DQRs
and the function to add DQRs calls this method.
**kwargs : keywords
Keyword arguments passed through to clean.clean_arm_qc
method.
Examples
--------
.. code-block:: python
files = act.tests.sample_files.EXAMPLE_MET1
ds = act.io.armfiles.read_netcdf(files)
ds.clean.cleanup()
"""
# Convert ARM QC to be more like CF state fields
if cleanup_arm_qc:
self._ds.clean.clean_arm_qc(**kwargs)
# Convert ARM state fields to be more liek CF state fields
if clean_arm_state_vars is not None:
self._ds.clean.clean_arm_state_variables(clean_arm_state_vars)
# Correctly convert data type because of missing value
# indicators in state and QC variables. Needs to be run after
# clean.clean_arm_qc to use CF attribute names.
if handle_missing_value:
self._ds.clean.handle_missing_values()
# Add some ancillary_variables linkages
# between data variable and QC variable
if link_qc_variables:
self._ds.clean.link_variables()
# Update the terminology used with flag_assessments to be consistent
if normalize_assessment:
self._ds.clean.normalize_assessment()
# Update from CF to standard used in ACT
if cleanup_cf_qc:
self._ds.clean.clean_cf_qc(**kwargs)
def handle_missing_values(self, default_missing_value=np.int32(-9999)):
"""
Correctly handle missing_value and _FillValue in the dataset.
xarray will automatically replace missing_value and
_FillValue in the data with NaN. This is great for data set
as type float but not great for int data. Can cause issues
with QC and state fields. This will loop through the array
looking for state and QC fields and revert them back to int
data type if upconverted to float to handle NaNs. Issue is that
xarray will convert data type to float if the attribute is defined
even if no data are set as missing value. xarray will also then
remove the missing_value or _FillValue variable attribute. This
will put the missing_value attribute back if needed.
Parameters
----------
default_missing_value : numpy int or float
The default missing value to use if a missing_value attribute
is not defined but one is needed.
"""
state_att_names = [
'flag_values',
'flag_meanings',
'flag_masks',
'flag_attributes',
]
# Look for variables that have 2 of the state_att_names defined
# as attribures and is of type float. If so assume the variable
# was incorreclty converted to float type.
for var in self._ds.data_vars:
var_att_names = self._ds[var].attrs.keys()
if len(set(state_att_names) & set(var_att_names)) >= 2 and self._ds[
var
].values.dtype in [
np.dtype('float16'),
np.dtype('float32'),
np.dtype('float64'),
]:
# Look at units variable to see if this is the stupid way some
# ARM products mix data and state variables. If the units are not
# in the normal list of unitless type assume this is a data variable
# and skip. Other option is to lookf or a valid_range attribute
# and skip. This is commented out for now since the units check
# appears to be working.
try:
if self._ds[var].attrs['units'] not in ['1', 'unitless', '', ' ']:
continue
except KeyError:
pass
# Change any np.nan values to missing value indicator
data = self._ds[var].values
data[np.isnan(data)] = default_missing_value.astype(data.dtype)
# Convert data to match type of flag_mask or flag_values
# as the best guess of what type is correct.
found_dtype = False
for att_name in ['flag_masks', 'flag_values']:
try:
att_value = self._ds[var].attrs[att_name]
if isinstance(att_value, (list, tuple)):
dtype = att_value[0].dtype
elif isinstance(att_value, str):
dtype = default_missing_value.dtype
att_value = att_value.replace(',', ' ').split()
att_value = np.array(att_value, dtype=dtype)
self._ds[var].attrs[att_name] = att_value
dtype = default_missing_value.dtype
else:
dtype = att_value.dtype
data = data.astype(dtype)
found_dtype = True
break
except (KeyError, IndexError, AttributeError):
pass
# If flag_mask or flag_values is not available choose an int type
# and set data to that type.
if found_dtype is False:
data = data.astype(default_missing_value.dtype)
# Return data to the dataset and add missing value indicator
# attribute to variable.
self._ds[var].values = data
self._ds[var].attrs['missing_value'] = default_missing_value.astype(data.dtype)
def get_attr_info(self, variable=None, flag=False):
"""
Get ARM quality control definitions from the ARM standard
bit_#_description, ... attributes and return as dictionary.
Will attempt to guess if the flag is integer or bit packed
based on what attributes are set.
Parameters
----------
variable : str
Variable name to get attribute information. If set to None
will get global attributes.
flag : bool
Optional flag indicating if QC is expected to be bitpacked
or integer. Flag = True indicates integer QC. Default
is bitpacked or False.
Returns
-------
attributes dictionary : dict or None
A dictionary contianing the attribute information converted from
ARM QC to CF QC. All keys include 'flag_meanings', 'flag_masks',
'flag_values', 'flag_assessments', 'flag_tests', 'arm_attributes'.
Returns None if none found.
"""
string = 'bit'
if flag:
string = 'flag'
else:
found_string = False
try:
if self._ds.attrs['qc_bit_comment']:
string = 'bit'
found_string = True
except KeyError:
pass
if found_string is False:
try:
if self._ds.attrs['qc_flag_comment']:
string = 'flag'
found_string = True
except KeyError:
pass
if found_string is False:
var = self.matched_qc_variables
if len(var) > 0:
try:
if self._ds[variable].attrs['flag_method'] == 'integer':
string = 'flag'
found_string = True
del self._ds[variable].attrs['flag_method']
except KeyError:
pass
try:
if variable:
attr_description_pattern = r'(^' + string + r')_([0-9]+)_(description$)'
attr_assessment_pattern = r'(^' + string + r')_([0-9]+)_(assessment$)'
attr_comment_pattern = r'(^' + string + r')_([0-9]+)_(comment$)'
attributes = self._ds[variable].attrs
else:
attr_description_pattern = r'(^qc_' + string + r')_([0-9]+)_(description$)'
attr_assessment_pattern = r'(^qc_' + string + r')_([0-9]+)_(assessment$)'
attr_comment_pattern = r'(^qc_' + string + r')_([0-9]+)_(comment$)'
attributes = self._ds.attrs
except KeyError:
return None
assessment_bit_num = []
description_bit_num = []
comment_bit_num = []
flag_masks = []
flag_meanings = []
flag_assessments = []
flag_comments = []
arm_attributes = []
dtype = np.int32
for att_name in attributes:
try:
description = re.match(attr_description_pattern, att_name)
description_bit_num.append(int(description.groups()[1]))
flag_meanings.append(attributes[att_name])
arm_attributes.append(att_name)
except AttributeError:
pass
try:
assessment = re.match(attr_assessment_pattern, att_name)
assessment_bit_num.append(int(assessment.groups()[1]))
flag_assessments.append(attributes[att_name])
arm_attributes.append(att_name)
except AttributeError:
pass
try:
comment = re.match(attr_comment_pattern, att_name)
comment_bit_num.append(int(comment.groups()[1]))
flag_comments.append(attributes[att_name])
arm_attributes.append(att_name)
except AttributeError:
pass
if variable is not None:
# Try and get the data type from the variable if it is an integer
# If not an integer make the flag values integers.
try:
dtype = self._ds[variable].values.dtype
if np.issubdtype(dtype, np.integer):
pass
else:
dtype = np.int32
except AttributeError:
pass
# Sort on bit number to ensure correct description order
index = np.argsort(description_bit_num)
flag_meanings = np.array(flag_meanings)
description_bit_num = np.array(description_bit_num)
flag_meanings = flag_meanings[index]
description_bit_num = description_bit_num[index]
# Sort on bit number to ensure correct assessment order
if len(flag_assessments) > 0:
if len(flag_assessments) < len(flag_meanings):
for ii in range(1, len(flag_meanings) + 1):
if ii not in assessment_bit_num:
assessment_bit_num.append(ii)
flag_assessments.append('')
index = np.argsort(assessment_bit_num)
flag_assessments = np.array(flag_assessments)
flag_assessments = flag_assessments[index]
# Sort on bit number to ensure correct comment order
if len(flag_comments) > 0:
if len(flag_comments) < len(flag_meanings):
for ii in range(1, len(flag_meanings) + 1):
if ii not in comment_bit_num:
comment_bit_num.append(ii)
flag_comments.append('')
index = np.argsort(comment_bit_num)
flag_comments = np.array(flag_comments)
flag_comments = flag_comments[index]
# Convert bit number to mask number
if len(description_bit_num) > 0:
flag_masks = np.array(description_bit_num)
flag_masks = np.left_shift(1, flag_masks - 1)
# build dictionary to return values
if len(flag_masks) > 0 or len(description_bit_num) > 0:
return_dict = dict()
return_dict['flag_meanings'] = list(np.array(flag_meanings, dtype=str))
if len(flag_masks) > 0 and max(flag_masks) > np.iinfo(np.uint32).max:
flag_mask_dtype = np.uint64
else:
flag_mask_dtype = np.uint32
if flag:
return_dict['flag_values'] = list(np.array(description_bit_num, dtype=dtype))
return_dict['flag_masks'] = list(np.array([], dtype=flag_mask_dtype))
else:
return_dict['flag_values'] = list(np.array([], dtype=dtype))
return_dict['flag_masks'] = list(np.array(flag_masks, dtype=flag_mask_dtype))
return_dict['flag_assessments'] = list(np.array(flag_assessments, dtype=str))
return_dict['flag_tests'] = list(np.array(description_bit_num, dtype=dtype))
return_dict['flag_comments'] = list(np.array(flag_comments, dtype=str))
return_dict['arm_attributes'] = arm_attributes
else:
# If nothing to return set to None
return_dict = None
return return_dict
def clean_arm_state_variables(
self,
variables,
override_cf_flag=True,
clean_units_string=True,
integer_flag=True,
replace_in_flag_meanings=None,
):
"""
Function to clean up state variables to use more CF style.
Parameters
----------
variables : str or list of str
List of variable names to update.
override_cf_flag : bool
Option to overwrite CF flag_meanings attribute if it exists
with the values from ARM QC bit_#_description.
clean_units_string : bool
Option to update units string if set to 'unitless' to be
udunits compliant '1'.
integer_flag : bool
Pass through keyword of 'flag' for get_attr_info().
replace_in_flag_meanings : None or string
Character string to search and replace in each flag meanings array value
to increase readability since the flag_meanings stored in netCDF file
is a single character array separated by space character. Alows for
replacing things like "_" with space character.
"""
if isinstance(variables, str):
variables = [variables]
for var in variables:
flag_info = self.get_attr_info(variable=var, flag=integer_flag)
if flag_info is not None:
# Add new attributes to variable
for attr in ['flag_values', 'flag_meanings', 'flag_masks']:
if len(flag_info[attr]) > 0:
# Only add if attribute does not exist.
if attr in self._ds[var].attrs.keys() is False:
self._ds[var].attrs[attr] = copy.copy(flag_info[attr])
# If flag is set, set attribure even if exists
elif override_cf_flag:
self._ds[var].attrs[attr] = copy.copy(flag_info[attr])
# Remove replaced attributes
arm_attributes = flag_info['arm_attributes']
for attr in arm_attributes:
try:
del self._ds[var].attrs[attr]
except KeyError:
pass
# Check if flag_meanings is string. If so convert to list.
try:
flag_meanings = copy.copy(self._ds[var].attrs['flag_meanings'])
if isinstance(flag_meanings, str):
flag_meanings = flag_meanings.split()
if replace_in_flag_meanings is not None:
for ii, flag_meaning in enumerate(flag_meanings):
flag_meaning = flag_meaning.replace(replace_in_flag_meanings, ' ')
flag_meanings[ii] = flag_meaning
self._ds[var].attrs['flag_meanings'] = flag_meanings
except KeyError:
pass
# Clean up units attribute from unitless to udunits '1'
try:
if clean_units_string and self._ds[var].attrs['units'] == 'unitless':
self._ds[var].attrs['units'] = '1'
except KeyError:
pass
def correct_valid_minmax(self, qc_variable):
"""
Function to correct the name and location of quality control limit
variables that use valid_min and valid_max incorrectly.
Parameters
----------
qc_variable : str
Name of quality control variable in the Xarray dataset to correct.
"""
test_dict = {
'valid_min': 'fail_min',
'valid_max': 'fail_max',
'valid_delta': 'fail_delta',
}
aa = re.match(r'^qc_(.+)', qc_variable)
variable = None
try:
variable = aa.groups()[0]
except AttributeError:
return
made_change = False
try:
flag_meanings = copy.copy(self._ds[qc_variable].attrs['flag_meanings'])
except KeyError:
return
for attr in test_dict.keys():
for ii, test in enumerate(flag_meanings):
if attr in test:
flag_meanings[ii] = re.sub(attr, test_dict[attr], test)
made_change = True
try:
self._ds[qc_variable].attrs[test_dict[attr]] = copy.copy(
self._ds[variable].attrs[attr]
)
del self._ds[variable].attrs[attr]
except KeyError:
pass
if made_change:
self._ds[qc_variable].attrs['flag_meanings'] = flag_meanings
def link_variables(self):
"""
Add some attributes to link and explain data
to QC data relationship. Will use non-CF standard_name
of quality_flag. Hopefully this will be added to the
standard_name table in the future.
"""
for var in self._ds.data_vars:
aa = re.match(r'^qc_(.+)', var)
try:
variable = aa.groups()[0]
qc_variable = var
except AttributeError:
continue
# Skip data quality fields.
try:
if not ('Quality check results on field:' in self._ds[var].attrs['long_name']):
continue
except KeyError:
pass
# Get existing data variable ancillary_variables attribute
try:
ancillary_variables = self._ds[variable].attrs['ancillary_variables']
except KeyError:
ancillary_variables = ''
# If the QC variable is not in ancillary_variables add
if qc_variable not in ancillary_variables:
ancillary_variables = qc_variable
self._ds[variable].attrs['ancillary_variables'] = copy.copy(ancillary_variables)
# Check if QC variable has correct standard_name and iff not fix it.
correct_standard_name = 'quality_flag'
try:
if self._ds[qc_variable].attrs['standard_name'] != correct_standard_name:
self._ds[qc_variable].attrs['standard_name'] = correct_standard_name
except KeyError:
self._ds[qc_variable].attrs['standard_name'] = correct_standard_name
def clean_arm_qc(
self,
override_cf_flag=True,
clean_units_string=True,
correct_valid_min_max=True,
remove_unset_global_tests=True,
**kwargs
):
"""
Method to clean up Xarray dataset QC variables.
Parameters
----------
override_cf_flag : bool
Option to overwrite CF flag_masks, flag_meanings, flag_values
if exists.
clean_units_string : bool
Option to clean up units string from 'unitless'
to udunits compliant '1'.
correct_valid_min_max : bool
Option to correct use of valid_min and valid_max with QC variables
by moving from data variable to QC varible, renaming to fail_min,
fail_max and fail_detla if the valid_min, valid_max or valid_delta
is listed in bit discription attribute. If not listed as
used with QC will assume is being used correctly.
remove_unset_global_tests : bool
Option to look for globaly defined tests that are not set at the
variable level and remove from quality control variable.
"""
global_qc = self.get_attr_info()
for qc_var in self.matched_qc_variables:
# Clean up units attribute from unitless to udunits '1'
try:
if clean_units_string and self._ds[qc_var].attrs['units'] == 'unitless':
self._ds[qc_var].attrs['units'] = '1'
except KeyError:
pass
qc_attributes = self.get_attr_info(variable=qc_var)
if qc_attributes is None:
qc_attributes = global_qc
# Add new attributes to variable
for attr in [
'flag_masks',
'flag_meanings',
'flag_assessments',
'flag_values',
'flag_comments',
]:
if qc_attributes is not None and len(qc_attributes[attr]) > 0:
# Only add if attribute does not exists
if attr in self._ds[qc_var].attrs.keys() is False:
self._ds[qc_var].attrs[attr] = copy.copy(qc_attributes[attr])
# If flag is set add attribure even if already exists
elif override_cf_flag:
self._ds[qc_var].attrs[attr] = copy.copy(qc_attributes[attr])
# Remove replaced attributes
if qc_attributes is not None:
arm_attributes = qc_attributes['arm_attributes']
if 'description' not in arm_attributes:
arm_attributes.append('description')
if 'flag_method' not in arm_attributes:
arm_attributes.append('flag_method')
for attr in arm_attributes:
try:
del self._ds[qc_var].attrs[attr]
except KeyError:
pass
# Check for use of valid_min and valid_max as QC limits and fix
if correct_valid_min_max:
self._ds.clean.correct_valid_minmax(qc_var)
# Clean up global attributes
if global_qc is not None:
global_attributes = global_qc['arm_attributes']
global_attributes.extend(['qc_bit_comment'])
for attr in global_attributes:
try:
del self._ds.attrs[attr]
except KeyError:
pass
# If requested remove tests at variable level that were set from global level descriptions.
# This is assuming the test was only performed if the limit value is listed with the variable
# even if the global level describes the test.
if remove_unset_global_tests and global_qc is not None:
limit_name_list = ['fail_min', 'fail_max', 'fail_delta']
for qc_var_name in self.matched_qc_variables:
flag_meanings = self._ds[qc_var_name].attrs['flag_meanings']
flag_masks = self._ds[qc_var_name].attrs['flag_masks']
tests_to_remove = []
for ii, flag_meaning in enumerate(flag_meanings):
# Loop over usual test attribute names looking to see if they
# are listed in test description. If so use that name for look up.
test_attribute_limit_name = None
for name in limit_name_list:
if name in flag_meaning:
test_attribute_limit_name = name
break
if test_attribute_limit_name is None:
continue
remove_test = True
test_number = parse_bit(flag_masks[ii])[0]
for attr_name in self._ds[qc_var_name].attrs:
if test_attribute_limit_name == attr_name:
remove_test = False
break
index = self._ds.qcfilter.get_qc_test_mask(
qc_var_name=qc_var_name, test_number=test_number
)
if np.any(index):
remove_test = False
break
if remove_test:
tests_to_remove.append(test_number)
if len(tests_to_remove) > 0:
for test_to_remove in tests_to_remove:
self._ds.qcfilter.remove_test(
qc_var_name=qc_var_name, test_number=test_to_remove
)
def normalize_assessment(
self,
variables=None,
exclude_variables=None,
qc_lookup={'Incorrect': 'Bad', 'Suspect': 'Indeterminate'},
):
"""
Method to clean up assessment terms used to be consistent between
embedded QC and DQRs.
Parameters
----------
variables : str or list of str
Optional data variable names to check and normalize. If set to
None will check all variables.
exclude_variables : str or list of str
Optional data variable names to exclude from processing.
qc_lookup : dict
Optional dictionary used to convert between terms.
Examples
--------
.. code-block:: python
ds = act.io.armfiles.read_netcdf(files)
ds.clean.normalize_assessment(variables='temp_mean')
.. code-block:: python
ds = act.io.armfiles.read_netcdf(files, cleanup_qc=True)
ds.clean.normalize_assessment(qc_lookup={'Bad': 'Incorrect', 'Indeterminate': 'Suspect'})
"""
# Get list of variables if not provided
if variables is None:
variables = list(self._ds.data_vars)
# Ensure variables is a list
if not isinstance(variables, (list, tuple)):
variables = [variables]
# If exclude variables provided remove from variables list
if exclude_variables is not None:
if not isinstance(exclude_variables, (list, tuple)):
exclude_variables = [exclude_variables]
variables = list(set(variables) - set(exclude_variables))
# Loop over variables checking if a QC variable exits and use the
# lookup dictionary to convert the assessment terms.
for var_name in variables:
qc_var_name = self._ds.qcfilter.check_for_ancillary_qc(
var_name, add_if_missing=False, cleanup=False
)
if qc_var_name is not None:
try:
flag_assessments = self._ds[qc_var_name].attrs['flag_assessments']
except KeyError:
continue
for ii, assess in enumerate(flag_assessments):
try:
flag_assessments[ii] = qc_lookup[assess]
except KeyError:
continue
def clean_cf_qc(self, variables=None, sep='__', **kwargs):
"""
Method to convert the CF standard for QC attributes to match internal
format expected in the Dataset. CF does not allow string attribute
arrays, even though netCDF4 does allow string attribute arrays. The quality
control variables uses and expects lists for flag_meaning, flag_assessments.
Parameters
----------
variables : str or list of str or None
Data variable names to convert. If set to None will check all variables.
sep : str or None
Separater to use for splitting individual test meanings. Since the CF
attribute in the netCDF file must be a string and is separated by a
space character, individual test meanings are connected with a character.
Default for ACT writing to file is double underscore to preserve underscores
in variable or attribute names.
kwargs : dict
Additional keyword argumnts not used. This is to allow calling multiple
methods from one method without causing unexpected keyword errors.
Examples
--------
.. code-block:: python
ds = act.io.armfiles.read_netcdf(files)
ds.clean.clean_cf_qc(variables='temp_mean')
.. code-block:: python
ds = act.io.armfiles.read_netcdf(files, cleanup_qc=True)
"""
# Convert string in to list of string for itteration
if isinstance(variables, str):
variables = [variables]
# If no variables provided, get list of all variables in Dataset
if variables is None:
variables = list(self._ds.data_vars)
for var_name in variables:
# Check flag_meanings type. If string separate on space character
# into list. If sep is not None split string on separater to make
# better looking list of strings.
try:
flag_meanings = self._ds[var_name].attrs['flag_meanings']
if isinstance(flag_meanings, str):
flag_meanings = flag_meanings.split()
if sep is not None:
flag_meanings = [ii.replace(sep, ' ') for ii in flag_meanings]
self._ds[var_name].attrs['flag_meanings'] = flag_meanings
except KeyError:
pass
# Check if flag_assessments is a string, split on space character
# to make list.
try:
flag_assessments = self._ds[var_name].attrs['flag_assessments']
if isinstance(flag_assessments, str):
flag_assessments = flag_assessments.split()
self._ds[var_name].attrs['flag_assessments'] = flag_assessments
except KeyError:
pass
# Check if flag_masks is a numpy scalar instead of array. If so convert
# to numpy array. If value is not numpy scalar, turn single value into
# list.
try:
flag_masks = self._ds[var_name].attrs['flag_masks']
if type(flag_masks).__module__ == 'numpy':
if flag_masks.shape == ():
self._ds[var_name].attrs['flag_masks'] = np.atleast_1d(flag_masks)
elif not isinstance(flag_masks, (list, tuple)):
self._ds[var_name].attrs['flag_masks'] = [flag_masks]
except KeyError:
pass
<|code_end|>
|
act/qc/clean.py
<|code_start|>"""
Class definitions for cleaning up QC variables to standard
cf-compliance.
"""
import copy
import re
import numpy as np
import xarray as xr
from act.qc.qcfilter import parse_bit
@xr.register_dataset_accessor('clean')
class CleanDataset:
"""
Class for cleaning up QC variables to standard cf-compliance
"""
def __init__(self, ds):
self._ds = ds
@property
def matched_qc_variables(self, check_arm_syntax=True):
"""
Find variables that are QC variables and return list of names.
Parameters
----------
check_arm_syntax : boolean
ARM ueses a standard of starting all quality control variables
with "qc" joined with an underscore. This is a more robust method
of getting the quality control variables before the standard_name
attribute is added. If this is true will first check using
attributes and will then check if variable starts with "qc".
Returns
-------
variables : list of str
A list of strings containing the name of each variable.
"""
# Will need to find all historical cases and add to list
description_list = [
'See global attributes for individual.+bit descriptions.',
(
'This field contains bit packed integer values, where each '
'bit represents a QC test on the data. Non-zero bits indicate '
'the QC condition given in the description for those bits; '
'a value of 0.+ indicates the data has not '
'failed any QC tests.'
),
(r'This field contains bit packed values which should be ' r'interpreted as listed..+'),
]
# Loop over each variable and look for a match to an attribute that
# would exist if the variable is a QC variable.
variables = []
for var in self._ds.data_vars:
try:
if self._ds[var].attrs['standard_name'] == 'quality_flag':
variables.append(var)
continue
except KeyError:
pass
if check_arm_syntax and var.startswith('qc_'):
variables.append(var)
continue
try:
for desc in description_list:
if re.match(desc, self._ds[var].attrs['description']) is not None:
variables.append(var)
break
except KeyError:
pass
variables = list(set(variables))
return variables
def cleanup(
self,
cleanup_arm_qc=True,
clean_arm_state_vars=None,
handle_missing_value=True,
link_qc_variables=True,
normalize_assessment=False,
cleanup_cf_qc=True,
**kwargs,
):
"""
Wrapper method to automatically call all the standard methods
for dataset cleanup.
Parameters
----------
cleanup_arm_qc : bool
Option to clean Xarray dataset from ARM QC to CF QC standards.
clean_arm_state_vars : list of str
Option to clean Xarray dataset state variables from ARM to CF
standards. Pass in list of variable names.
handle_missing_value : bool
Go through variables and look for cases where a QC or state varible
was convereted to a float and missing values set to np.nan. This
is done because of xarry's default to use mask_and_scale=True.
This will convert the data type back to integer and replace
any instances of np.nan to a missing value indicator (most
likely -9999).
link_qc_variables : bool
Option to link QC variablers through ancillary_variables if not
already set.
normalize_assessment : bool
Option to clean up assessments to use the same terminology. Set to
False for default because should only be an issue after adding DQRs
and the function to add DQRs calls this method.
**kwargs : keywords
Keyword arguments passed through to clean.clean_arm_qc
method.
Examples
--------
.. code-block:: python
files = act.tests.sample_files.EXAMPLE_MET1
ds = act.io.armfiles.read_netcdf(files)
ds.clean.cleanup()
"""
# Convert ARM QC to be more like CF state fields
if cleanup_arm_qc:
self._ds.clean.clean_arm_qc(**kwargs)
# Convert ARM state fields to be more liek CF state fields
if clean_arm_state_vars is not None:
self._ds.clean.clean_arm_state_variables(clean_arm_state_vars)
# Correctly convert data type because of missing value
# indicators in state and QC variables. Needs to be run after
# clean.clean_arm_qc to use CF attribute names.
if handle_missing_value:
self._ds.clean.handle_missing_values()
# Add some ancillary_variables linkages
# between data variable and QC variable
if link_qc_variables:
self._ds.clean.link_variables()
# Update the terminology used with flag_assessments to be consistent
if normalize_assessment:
self._ds.clean.normalize_assessment()
# Update from CF to standard used in ACT
if cleanup_cf_qc:
self._ds.clean.clean_cf_qc(**kwargs)
def handle_missing_values(self, default_missing_value=np.int32(-9999)):
"""
Correctly handle missing_value and _FillValue in the dataset.
xarray will automatically replace missing_value and
_FillValue in the data with NaN. This is great for data set
as type float but not great for int data. Can cause issues
with QC and state fields. This will loop through the array
looking for state and QC fields and revert them back to int
data type if upconverted to float to handle NaNs. Issue is that
xarray will convert data type to float if the attribute is defined
even if no data are set as missing value. xarray will also then
remove the missing_value or _FillValue variable attribute. This
will put the missing_value attribute back if needed.
Parameters
----------
default_missing_value : numpy int or float
The default missing value to use if a missing_value attribute
is not defined but one is needed.
"""
state_att_names = [
'flag_values',
'flag_meanings',
'flag_masks',
'flag_attributes',
]
# Look for variables that have 2 of the state_att_names defined
# as attribures and is of type float. If so assume the variable
# was incorreclty converted to float type.
for var in self._ds.data_vars:
var_att_names = self._ds[var].attrs.keys()
if len(set(state_att_names) & set(var_att_names)) >= 2 and self._ds[
var
].values.dtype in [
np.dtype('float16'),
np.dtype('float32'),
np.dtype('float64'),
]:
# Look at units variable to see if this is the stupid way some
# ARM products mix data and state variables. If the units are not
# in the normal list of unitless type assume this is a data variable
# and skip. Other option is to lookf or a valid_range attribute
# and skip. This is commented out for now since the units check
# appears to be working.
try:
if self._ds[var].attrs['units'] not in ['1', 'unitless', '', ' ']:
continue
except KeyError:
pass
# Change any np.nan values to missing value indicator
data = self._ds[var].values
data[np.isnan(data)] = default_missing_value.astype(data.dtype)
# Convert data to match type of flag_mask or flag_values
# as the best guess of what type is correct.
found_dtype = False
for att_name in ['flag_masks', 'flag_values']:
try:
att_value = self._ds[var].attrs[att_name]
if isinstance(att_value, (list, tuple)):
dtype = att_value[0].dtype
elif isinstance(att_value, str):
dtype = default_missing_value.dtype
att_value = att_value.replace(',', ' ').split()
att_value = np.array(att_value, dtype=dtype)
self._ds[var].attrs[att_name] = att_value
dtype = default_missing_value.dtype
else:
dtype = att_value.dtype
data = data.astype(dtype)
found_dtype = True
break
except (KeyError, IndexError, AttributeError):
pass
# If flag_mask or flag_values is not available choose an int type
# and set data to that type.
if found_dtype is False:
data = data.astype(default_missing_value.dtype)
# Return data to the dataset and add missing value indicator
# attribute to variable.
self._ds[var].values = data
self._ds[var].attrs['missing_value'] = default_missing_value.astype(data.dtype)
def get_attr_info(self, variable=None, flag=False):
"""
Get ARM quality control definitions from the ARM standard
bit_#_description, ... attributes and return as dictionary.
Will attempt to guess if the flag is integer or bit packed
based on what attributes are set.
Parameters
----------
variable : str
Variable name to get attribute information. If set to None
will get global attributes.
flag : bool
Optional flag indicating if QC is expected to be bitpacked
or integer. Flag = True indicates integer QC. Default
is bitpacked or False.
Returns
-------
attributes dictionary : dict or None
A dictionary contianing the attribute information converted from
ARM QC to CF QC. All keys include 'flag_meanings', 'flag_masks',
'flag_values', 'flag_assessments', 'flag_tests', 'arm_attributes'.
Returns None if none found.
"""
string = 'bit'
if flag:
string = 'flag'
else:
found_string = False
try:
if self._ds.attrs['qc_bit_comment']:
string = 'bit'
found_string = True
except KeyError:
pass
if found_string is False:
try:
if self._ds.attrs['qc_flag_comment']:
string = 'flag'
found_string = True
except KeyError:
pass
if found_string is False:
var = self.matched_qc_variables
if len(var) > 0:
try:
if self._ds[variable].attrs['flag_method'] == 'integer':
string = 'flag'
found_string = True
del self._ds[variable].attrs['flag_method']
except KeyError:
pass
try:
if variable:
attr_description_pattern = r'(^' + string + r')_([0-9]+)_(description$)'
attr_assessment_pattern = r'(^' + string + r')_([0-9]+)_(assessment$)'
attr_comment_pattern = r'(^' + string + r')_([0-9]+)_(comment$)'
attributes = self._ds[variable].attrs
else:
attr_description_pattern = r'(^qc_' + string + r')_([0-9]+)_(description$)'
attr_assessment_pattern = r'(^qc_' + string + r')_([0-9]+)_(assessment$)'
attr_comment_pattern = r'(^qc_' + string + r')_([0-9]+)_(comment$)'
attributes = self._ds.attrs
except KeyError:
return None
assessment_bit_num = []
description_bit_num = []
comment_bit_num = []
flag_masks = []
flag_meanings = []
flag_assessments = []
flag_comments = []
arm_attributes = []
dtype = np.int32
for att_name in attributes:
try:
description = re.match(attr_description_pattern, att_name)
description_bit_num.append(int(description.groups()[1]))
flag_meanings.append(attributes[att_name])
arm_attributes.append(att_name)
except AttributeError:
pass
try:
assessment = re.match(attr_assessment_pattern, att_name)
assessment_bit_num.append(int(assessment.groups()[1]))
flag_assessments.append(attributes[att_name])
arm_attributes.append(att_name)
except AttributeError:
pass
try:
comment = re.match(attr_comment_pattern, att_name)
comment_bit_num.append(int(comment.groups()[1]))
flag_comments.append(attributes[att_name])
arm_attributes.append(att_name)
except AttributeError:
pass
if variable is not None:
# Try and get the data type from the variable if it is an integer
# If not an integer make the flag values integers.
try:
dtype = self._ds[variable].values.dtype
if np.issubdtype(dtype, np.integer):
pass
else:
dtype = np.int32
except AttributeError:
pass
# Sort on bit number to ensure correct description order
index = np.argsort(description_bit_num)
flag_meanings = np.array(flag_meanings)
description_bit_num = np.array(description_bit_num)
flag_meanings = flag_meanings[index]
description_bit_num = description_bit_num[index]
# Sort on bit number to ensure correct assessment order
if len(flag_assessments) > 0:
if len(flag_assessments) < len(flag_meanings):
for ii in range(1, len(flag_meanings) + 1):
if ii not in assessment_bit_num:
assessment_bit_num.append(ii)
flag_assessments.append('')
index = np.argsort(assessment_bit_num)
flag_assessments = np.array(flag_assessments)
flag_assessments = flag_assessments[index]
# Sort on bit number to ensure correct comment order
if len(flag_comments) > 0:
if len(flag_comments) < len(flag_meanings):
for ii in range(1, len(flag_meanings) + 1):
if ii not in comment_bit_num:
comment_bit_num.append(ii)
flag_comments.append('')
index = np.argsort(comment_bit_num)
flag_comments = np.array(flag_comments)
flag_comments = flag_comments[index]
# Convert bit number to mask number
if len(description_bit_num) > 0:
flag_masks = np.array(description_bit_num)
flag_masks = np.left_shift(1, flag_masks - 1)
# build dictionary to return values
if len(flag_masks) > 0 or len(description_bit_num) > 0:
return_dict = dict()
return_dict['flag_meanings'] = list(np.array(flag_meanings, dtype=str))
if len(flag_masks) > 0 and max(flag_masks) > np.iinfo(np.uint32).max:
flag_mask_dtype = np.uint64
else:
flag_mask_dtype = np.uint32
if flag:
return_dict['flag_values'] = list(np.array(description_bit_num, dtype=dtype))
return_dict['flag_masks'] = list(np.array([], dtype=flag_mask_dtype))
else:
return_dict['flag_values'] = list(np.array([], dtype=dtype))
return_dict['flag_masks'] = list(np.array(flag_masks, dtype=flag_mask_dtype))
return_dict['flag_assessments'] = list(np.array(flag_assessments, dtype=str))
return_dict['flag_tests'] = list(np.array(description_bit_num, dtype=dtype))
return_dict['flag_comments'] = list(np.array(flag_comments, dtype=str))
return_dict['arm_attributes'] = arm_attributes
else:
# If nothing to return set to None
return_dict = None
# If no QC is found but there's a Mentor_QC_Field_Information global attribute,
# hard code the information. This is for older ARM files that had QC information
# in this global attribute. For these cases, this should hold 100%
if return_dict is None and 'Mentor_QC_Field_Information' in self._ds.attrs:
qc_att = self._ds.attrs['Mentor_QC_Field_Information']
if 'Basic mentor QC checks' in qc_att:
if len(qc_att) == 920 or len(qc_att) == 1562:
return_dict = dict()
return_dict['flag_meanings'] = [
'Value is equal to missing_value.',
'Value is less than the valid_min.',
'Value is greater than the valid_max.',
'Difference between current and previous values exceeds valid_delta.'
]
return_dict['flag_tests'] = [1, 2, 3, 4]
return_dict['flag_masks'] = [1, 2, 4, 8]
return_dict['flag_assessments'] = ['Bad', 'Bad', 'Bad', 'Indeterminate']
return_dict['flag_values'] = []
return_dict['flag_comments'] = []
return_dict['arm_attributes'] = [
'bit_1_description',
'bit_1_assessment',
'bit_2_description',
'bit_2_assessment',
'bit_3_description',
'bit_3_assessment',
'bit_4_description',
'bit_4_assessment'
]
return return_dict
def clean_arm_state_variables(
self,
variables,
override_cf_flag=True,
clean_units_string=True,
integer_flag=True,
replace_in_flag_meanings=None,
):
"""
Function to clean up state variables to use more CF style.
Parameters
----------
variables : str or list of str
List of variable names to update.
override_cf_flag : bool
Option to overwrite CF flag_meanings attribute if it exists
with the values from ARM QC bit_#_description.
clean_units_string : bool
Option to update units string if set to 'unitless' to be
udunits compliant '1'.
integer_flag : bool
Pass through keyword of 'flag' for get_attr_info().
replace_in_flag_meanings : None or string
Character string to search and replace in each flag meanings array value
to increase readability since the flag_meanings stored in netCDF file
is a single character array separated by space character. Alows for
replacing things like "_" with space character.
"""
if isinstance(variables, str):
variables = [variables]
for var in variables:
flag_info = self.get_attr_info(variable=var, flag=integer_flag)
if flag_info is not None:
# Add new attributes to variable
for attr in ['flag_values', 'flag_meanings', 'flag_masks']:
if len(flag_info[attr]) > 0:
# Only add if attribute does not exist.
if attr in self._ds[var].attrs.keys() is False:
self._ds[var].attrs[attr] = copy.copy(flag_info[attr])
# If flag is set, set attribure even if exists
elif override_cf_flag:
self._ds[var].attrs[attr] = copy.copy(flag_info[attr])
# Remove replaced attributes
arm_attributes = flag_info['arm_attributes']
for attr in arm_attributes:
try:
del self._ds[var].attrs[attr]
except KeyError:
pass
# Check if flag_meanings is string. If so convert to list.
try:
flag_meanings = copy.copy(self._ds[var].attrs['flag_meanings'])
if isinstance(flag_meanings, str):
flag_meanings = flag_meanings.split()
if replace_in_flag_meanings is not None:
for ii, flag_meaning in enumerate(flag_meanings):
flag_meaning = flag_meaning.replace(replace_in_flag_meanings, ' ')
flag_meanings[ii] = flag_meaning
self._ds[var].attrs['flag_meanings'] = flag_meanings
except KeyError:
pass
# Clean up units attribute from unitless to udunits '1'
try:
if clean_units_string and self._ds[var].attrs['units'] == 'unitless':
self._ds[var].attrs['units'] = '1'
except KeyError:
pass
def correct_valid_minmax(self, qc_variable):
"""
Function to correct the name and location of quality control limit
variables that use valid_min and valid_max incorrectly.
Parameters
----------
qc_variable : str
Name of quality control variable in the Xarray dataset to correct.
"""
test_dict = {
'valid_min': 'fail_min',
'valid_max': 'fail_max',
'valid_delta': 'fail_delta',
}
aa = re.match(r'^qc_(.+)', qc_variable)
variable = None
try:
variable = aa.groups()[0]
except AttributeError:
return
made_change = False
try:
flag_meanings = copy.copy(self._ds[qc_variable].attrs['flag_meanings'])
except KeyError:
return
for attr in test_dict.keys():
for ii, test in enumerate(flag_meanings):
if attr in test:
flag_meanings[ii] = re.sub(attr, test_dict[attr], test)
made_change = True
try:
self._ds[qc_variable].attrs[test_dict[attr]] = copy.copy(
self._ds[variable].attrs[attr]
)
del self._ds[variable].attrs[attr]
except KeyError:
pass
if made_change:
self._ds[qc_variable].attrs['flag_meanings'] = flag_meanings
def link_variables(self):
"""
Add some attributes to link and explain data
to QC data relationship. Will use non-CF standard_name
of quality_flag. Hopefully this will be added to the
standard_name table in the future.
"""
for var in self._ds.data_vars:
aa = re.match(r'^qc_(.+)', var)
try:
variable = aa.groups()[0]
qc_variable = var
except AttributeError:
continue
# Skip data quality fields.
try:
if not ('Quality check results on field:' in self._ds[var].attrs['long_name']):
continue
except KeyError:
pass
# Get existing data variable ancillary_variables attribute
try:
ancillary_variables = self._ds[variable].attrs['ancillary_variables']
except KeyError:
ancillary_variables = ''
# If the QC variable is not in ancillary_variables add
if qc_variable not in ancillary_variables:
ancillary_variables = qc_variable
self._ds[variable].attrs['ancillary_variables'] = copy.copy(ancillary_variables)
# Check if QC variable has correct standard_name and iff not fix it.
correct_standard_name = 'quality_flag'
try:
if self._ds[qc_variable].attrs['standard_name'] != correct_standard_name:
self._ds[qc_variable].attrs['standard_name'] = correct_standard_name
except KeyError:
self._ds[qc_variable].attrs['standard_name'] = correct_standard_name
def clean_arm_qc(
self,
override_cf_flag=True,
clean_units_string=True,
correct_valid_min_max=True,
remove_unset_global_tests=True,
**kwargs
):
"""
Method to clean up Xarray dataset QC variables.
Parameters
----------
override_cf_flag : bool
Option to overwrite CF flag_masks, flag_meanings, flag_values
if exists.
clean_units_string : bool
Option to clean up units string from 'unitless'
to udunits compliant '1'.
correct_valid_min_max : bool
Option to correct use of valid_min and valid_max with QC variables
by moving from data variable to QC varible, renaming to fail_min,
fail_max and fail_detla if the valid_min, valid_max or valid_delta
is listed in bit discription attribute. If not listed as
used with QC will assume is being used correctly.
remove_unset_global_tests : bool
Option to look for globaly defined tests that are not set at the
variable level and remove from quality control variable.
"""
global_qc = self.get_attr_info()
for qc_var in self.matched_qc_variables:
# Clean up units attribute from unitless to udunits '1'
try:
if clean_units_string and self._ds[qc_var].attrs['units'] == 'unitless':
self._ds[qc_var].attrs['units'] = '1'
except KeyError:
pass
qc_attributes = self.get_attr_info(variable=qc_var)
if qc_attributes is None:
qc_attributes = global_qc
# Add new attributes to variable
for attr in [
'flag_masks',
'flag_meanings',
'flag_assessments',
'flag_values',
'flag_comments',
]:
if qc_attributes is not None and len(qc_attributes[attr]) > 0:
# Only add if attribute does not exists
if attr in self._ds[qc_var].attrs.keys() is False:
self._ds[qc_var].attrs[attr] = copy.copy(qc_attributes[attr])
# If flag is set add attribure even if already exists
elif override_cf_flag:
self._ds[qc_var].attrs[attr] = copy.copy(qc_attributes[attr])
# Remove replaced attributes
if qc_attributes is not None:
arm_attributes = qc_attributes['arm_attributes']
if 'description' not in arm_attributes:
arm_attributes.append('description')
if 'flag_method' not in arm_attributes:
arm_attributes.append('flag_method')
for attr in arm_attributes:
try:
del self._ds[qc_var].attrs[attr]
except KeyError:
pass
# Check for use of valid_min and valid_max as QC limits and fix
if correct_valid_min_max:
self._ds.clean.correct_valid_minmax(qc_var)
# Clean up global attributes
if global_qc is not None:
global_attributes = global_qc['arm_attributes']
global_attributes.extend(['qc_bit_comment'])
for attr in global_attributes:
try:
del self._ds.attrs[attr]
except KeyError:
pass
# If requested remove tests at variable level that were set from global level descriptions.
# This is assuming the test was only performed if the limit value is listed with the variable
# even if the global level describes the test.
if remove_unset_global_tests and global_qc is not None:
limit_name_list = ['fail_min', 'fail_max', 'fail_delta']
for qc_var_name in self.matched_qc_variables:
flag_meanings = self._ds[qc_var_name].attrs['flag_meanings']
flag_masks = self._ds[qc_var_name].attrs['flag_masks']
tests_to_remove = []
for ii, flag_meaning in enumerate(flag_meanings):
# Loop over usual test attribute names looking to see if they
# are listed in test description. If so use that name for look up.
test_attribute_limit_name = None
for name in limit_name_list:
if name in flag_meaning:
test_attribute_limit_name = name
break
if test_attribute_limit_name is None:
continue
remove_test = True
test_number = parse_bit(flag_masks[ii])[0]
for attr_name in self._ds[qc_var_name].attrs:
if test_attribute_limit_name == attr_name:
remove_test = False
break
index = self._ds.qcfilter.get_qc_test_mask(
qc_var_name=qc_var_name, test_number=test_number
)
if np.any(index):
remove_test = False
break
if remove_test:
tests_to_remove.append(test_number)
if len(tests_to_remove) > 0:
for test_to_remove in tests_to_remove:
self._ds.qcfilter.remove_test(
qc_var_name=qc_var_name, test_number=test_to_remove
)
def normalize_assessment(
self,
variables=None,
exclude_variables=None,
qc_lookup={'Incorrect': 'Bad', 'Suspect': 'Indeterminate'},
):
"""
Method to clean up assessment terms used to be consistent between
embedded QC and DQRs.
Parameters
----------
variables : str or list of str
Optional data variable names to check and normalize. If set to
None will check all variables.
exclude_variables : str or list of str
Optional data variable names to exclude from processing.
qc_lookup : dict
Optional dictionary used to convert between terms.
Examples
--------
.. code-block:: python
ds = act.io.armfiles.read_netcdf(files)
ds.clean.normalize_assessment(variables='temp_mean')
.. code-block:: python
ds = act.io.armfiles.read_netcdf(files, cleanup_qc=True)
ds.clean.normalize_assessment(qc_lookup={'Bad': 'Incorrect', 'Indeterminate': 'Suspect'})
"""
# Get list of variables if not provided
if variables is None:
variables = list(self._ds.data_vars)
# Ensure variables is a list
if not isinstance(variables, (list, tuple)):
variables = [variables]
# If exclude variables provided remove from variables list
if exclude_variables is not None:
if not isinstance(exclude_variables, (list, tuple)):
exclude_variables = [exclude_variables]
variables = list(set(variables) - set(exclude_variables))
# Loop over variables checking if a QC variable exits and use the
# lookup dictionary to convert the assessment terms.
for var_name in variables:
qc_var_name = self._ds.qcfilter.check_for_ancillary_qc(
var_name, add_if_missing=False, cleanup=False
)
if qc_var_name is not None:
try:
flag_assessments = self._ds[qc_var_name].attrs['flag_assessments']
except KeyError:
continue
for ii, assess in enumerate(flag_assessments):
try:
flag_assessments[ii] = qc_lookup[assess]
except KeyError:
continue
def clean_cf_qc(self, variables=None, sep='__', **kwargs):
"""
Method to convert the CF standard for QC attributes to match internal
format expected in the Dataset. CF does not allow string attribute
arrays, even though netCDF4 does allow string attribute arrays. The quality
control variables uses and expects lists for flag_meaning, flag_assessments.
Parameters
----------
variables : str or list of str or None
Data variable names to convert. If set to None will check all variables.
sep : str or None
Separater to use for splitting individual test meanings. Since the CF
attribute in the netCDF file must be a string and is separated by a
space character, individual test meanings are connected with a character.
Default for ACT writing to file is double underscore to preserve underscores
in variable or attribute names.
kwargs : dict
Additional keyword argumnts not used. This is to allow calling multiple
methods from one method without causing unexpected keyword errors.
Examples
--------
.. code-block:: python
ds = act.io.armfiles.read_netcdf(files)
ds.clean.clean_cf_qc(variables='temp_mean')
.. code-block:: python
ds = act.io.armfiles.read_netcdf(files, cleanup_qc=True)
"""
# Convert string in to list of string for itteration
if isinstance(variables, str):
variables = [variables]
# If no variables provided, get list of all variables in Dataset
if variables is None:
variables = list(self._ds.data_vars)
for var_name in variables:
# Check flag_meanings type. If string separate on space character
# into list. If sep is not None split string on separater to make
# better looking list of strings.
try:
flag_meanings = self._ds[var_name].attrs['flag_meanings']
if isinstance(flag_meanings, str):
flag_meanings = flag_meanings.split()
if sep is not None:
flag_meanings = [ii.replace(sep, ' ') for ii in flag_meanings]
self._ds[var_name].attrs['flag_meanings'] = flag_meanings
except KeyError:
pass
# Check if flag_assessments is a string, split on space character
# to make list.
try:
flag_assessments = self._ds[var_name].attrs['flag_assessments']
if isinstance(flag_assessments, str):
flag_assessments = flag_assessments.split()
self._ds[var_name].attrs['flag_assessments'] = flag_assessments
except KeyError:
pass
# Check if flag_masks is a numpy scalar instead of array. If so convert
# to numpy array. If value is not numpy scalar, turn single value into
# list.
try:
flag_masks = self._ds[var_name].attrs['flag_masks']
if type(flag_masks).__module__ == 'numpy':
if flag_masks.shape == ():
self._ds[var_name].attrs['flag_masks'] = np.atleast_1d(flag_masks)
elif not isinstance(flag_masks, (list, tuple)):
self._ds[var_name].attrs['flag_masks'] = [flag_masks]
except KeyError:
pass
<|code_end|>
|
Sunset Stamen maps in GeoDisplay and potentially replace
Stamen is transitioning their maps to stadia at the end of October 2023. ACT will need to deprecate that feature in GeoDisplay and potentially look for replacements.
https://github.com/SciTools/cartopy/pull/2266
|
act/plotting/geodisplay.py
<|code_start|>"""
Stores the class for GeographicPlotDisplay.
"""
import warnings
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from .plot import Display
try:
import cartopy.crs as ccrs
import cartopy.feature as cfeature
from cartopy.io import img_tiles
CARTOPY_AVAILABLE = True
except ImportError:
CARTOPY_AVAILABLE = False
class GeographicPlotDisplay(Display):
"""
A class for making geographic tracer plot of aircraft, ship or other moving
platform plot.
This is inherited from the :func:`act.plotting.Display`
class and has therefore has the same attributes as that class.
See :func:`act.plotting.Display`
for more information. There are no additional attributes or parameters
to this class.
In order to create geographic plots, ACT needs the Cartopy package to be
installed on your system. More information about
Cartopy go here:https://scitools.org.uk/cartopy/docs/latest/ .
"""
def __init__(self, ds, ds_name=None, **kwargs):
if not CARTOPY_AVAILABLE:
raise ImportError(
'Cartopy needs to be installed on your ' 'system to make geographic display plots.'
)
super().__init__(ds, ds_name, secondary_y_allowed=False, **kwargs)
if self.fig is None:
self.fig = plt.figure(**kwargs)
def geoplot(
self,
data_field=None,
lat_field='lat',
lon_field='lon',
dsname=None,
cbar_label=None,
title=None,
projection=None,
plot_buffer=0.08,
img_tile=None,
img_tile_args={},
tile=8,
stamen='terrain-background',
cartopy_feature=None,
cmap='rainbow',
text=None,
gridlines=True,
**kwargs,
):
"""
Creates a latitude and longitude plot of a time series data set with
data values indicated by color and described with a colorbar.
Latitude values must be in degree north (-90 to 90) and
longitude must be in degree east (-180 to 180).
Parameters
----------
data_field : str
Name of data field in the dataset to plot.
lat_field : str
Name of latitude field in the dataset to use.
lon_field : str
Name of longitude field in the dataset to use.
dsname : str or None
The name of the datastream to plot. Set to None to make ACT
attempt to automatically determine this.
cbar_label : str
Label to use with colorbar. If set to None will attempt
to create label from long_name and units.
title : str
Plot title.
projection : cartopy.crs object
Project to use on plot. See
https://scitools.org.uk/cartopy/docs/latest/reference/projections.html?highlight=projections
plot_buffer : float
Buffer to add around data on plot in lat and lon dimension.
img_tile : str
Image to use for the plot background. Set to None to not use
background image. For all image background types, see:
https://scitools.org.uk/cartopy/docs/v0.16/cartopy/io/img_tiles.html
Default is None.
img_tile_args : dict
Keyword arguments for the chosen img_tile. These arguments can be
found for the corresponding img_tile here:
https://scitools.org.uk/cartopy/docs/v0.16/cartopy/io/img_tiles.html
Default is an empty dictionary.
tile : int
Tile zoom to use with background image. Higher number indicates
more resolution. A value of 8 is typical for a normal sonde plot.
cartopy_feature : list of str or str
Cartopy feature to add to plot.
cmap : str
Color map to use for colorbar.
text : dictionary
Dictionary of {text:[lon,lat]} to add to plot. Can have more
than one set of text to add.
gridlines : boolean
Use latitude and longitude gridlines.
**kwargs : keyword arguments
Any other keyword arguments that will be passed
into :func:`matplotlib.pyplot.scatter` when the figure
is made. See the matplotlib documentation for further details
on what keyword arguments are available.
"""
if dsname is None and len(self._ds.keys()) > 1:
raise ValueError(
'You must choose a datastream when there are 2 '
'or more datasets in the GeographicPlotDisplay '
'object.'
)
elif dsname is None:
dsname = list(self._ds.keys())[0]
if data_field is None:
raise ValueError('You must enter the name of the data ' 'to be plotted.')
if projection is None:
if CARTOPY_AVAILABLE:
projection = ccrs.PlateCarree()
# Extract data from the dataset
try:
lat = self._ds[dsname][lat_field].values
except KeyError:
raise ValueError(
(
'You will need to provide the name of the '
"field if not '{}' to use for latitude "
'data.'
).format(lat_field)
)
try:
lon = self._ds[dsname][lon_field].values
except KeyError:
raise ValueError(
(
'You will need to provide the name of the '
"field if not '{}' to use for longitude "
'data.'
).format(lon_field)
)
# Set up metadata information for display on plot
if cbar_label is None:
try:
cbar_label = (
self._ds[dsname][data_field].attrs['long_name']
+ ' ('
+ self._ds[dsname][data_field].attrs['units']
+ ')'
)
except KeyError:
cbar_label = data_field
lat_limits = [np.nanmin(lat), np.nanmax(lat)]
lon_limits = [np.nanmin(lon), np.nanmax(lon)]
box_size = np.max([np.abs(np.diff(lat_limits)), np.abs(np.diff(lon_limits))])
bx_buf = box_size * plot_buffer
lat_center = np.sum(lat_limits) / 2.0
lon_center = np.sum(lon_limits) / 2.0
lat_limits = [
lat_center - box_size / 2.0 - bx_buf,
lat_center + box_size / 2.0 + bx_buf,
]
lon_limits = [
lon_center - box_size / 2.0 - bx_buf,
lon_center + box_size / 2.0 + bx_buf,
]
data = self._ds[dsname][data_field].values
# Create base plot projection
ax = plt.axes(projection=projection)
plt.subplots_adjust(left=0.01, right=0.99, bottom=0.05, top=0.93)
ax.set_extent([lon_limits[0], lon_limits[1], lat_limits[0], lat_limits[1]], crs=projection)
if title is None:
try:
dim = list(self._ds[dsname][data_field].dims)
ts = pd.to_datetime(str(self._ds[dsname][dim[0]].values[0]))
date = ts.strftime('%Y-%m-%d')
time_str = ts.strftime('%H:%M:%S')
plt.title(' '.join([dsname, 'at', date, time_str]))
except NameError:
plt.title(dsname)
else:
plt.title(title)
if stamen and img_tile is None:
tiler = img_tiles.Stamen(stamen)
ax.add_image(tiler, tile)
warnings.warn(
"Stamen is deprecated in Cartopy and in future versions of ACT, "
"please use img_tile to specify the image background. ")
else:
if img_tile is not None:
tiler = getattr(img_tiles, img_tile)(**img_tile_args)
ax.add_image(tiler, tile)
colorbar_map = None
if cmap is not None:
colorbar_map = matplotlib.colormaps.get_cmap(cmap)
sc = ax.scatter(lon, lat, c=data, cmap=colorbar_map, **kwargs)
cbar = plt.colorbar(sc)
cbar.ax.set_ylabel(cbar_label)
if cartopy_feature is not None:
if isinstance(cartopy_feature, str):
cartopy_feature = [cartopy_feature]
cartopy_feature = [ii.upper() for ii in cartopy_feature]
if 'STATES' in cartopy_feature:
ax.add_feature(cfeature.STATES.with_scale('10m'))
if 'LAND' in cartopy_feature:
ax.add_feature(cfeature.LAND)
if 'OCEAN' in cartopy_feature:
ax.add_feature(cfeature.OCEAN)
if 'COASTLINE' in cartopy_feature:
ax.add_feature(cfeature.COASTLINE)
if 'BORDERS' in cartopy_feature:
ax.add_feature(cfeature.BORDERS, linestyle=':')
if 'LAKES' in cartopy_feature:
ax.add_feature(cfeature.LAKES, alpha=0.5)
if 'RIVERS' in cartopy_feature:
ax.add_feature(cfeature.RIVERS)
if text is not None:
for label, location in text.items():
ax.plot(location[0], location[1], marker='*', color='black')
ax.text(location[0], location[1], label, color='black')
if gridlines:
if projection == ccrs.PlateCarree() or projection == ccrs.Mercator:
gl = ax.gridlines(
crs=projection,
draw_labels=True,
linewidth=1,
color='gray',
alpha=0.5,
linestyle='--',
)
gl.top_labels = False
gl.left_labels = True
gl.bottom_labels = True
gl.right_labels = False
gl.xlabel_style = {'size': 6, 'color': 'gray'}
gl.ylabel_style = {'size': 6, 'color': 'gray'}
else:
# Labels are only currently supported for PlateCarree and Mercator
gl = ax.gridlines(
draw_labels=False,
linewidth=1,
color='gray',
alpha=0.5,
linestyle='--',
)
return ax
<|code_end|>
|
act/plotting/geodisplay.py
<|code_start|>"""
Stores the class for GeographicPlotDisplay.
"""
import warnings
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from .plot import Display
try:
import cartopy.crs as ccrs
import cartopy.feature as cfeature
from cartopy.io import img_tiles
CARTOPY_AVAILABLE = True
except ImportError:
CARTOPY_AVAILABLE = False
class GeographicPlotDisplay(Display):
"""
A class for making geographic tracer plot of aircraft, ship or other moving
platform plot.
This is inherited from the :func:`act.plotting.Display`
class and has therefore has the same attributes as that class.
See :func:`act.plotting.Display`
for more information. There are no additional attributes or parameters
to this class.
In order to create geographic plots, ACT needs the Cartopy package to be
installed on your system. More information about
Cartopy go here:https://scitools.org.uk/cartopy/docs/latest/ .
"""
def __init__(self, ds, ds_name=None, **kwargs):
if not CARTOPY_AVAILABLE:
raise ImportError(
'Cartopy needs to be installed on your ' 'system to make geographic display plots.'
)
super().__init__(ds, ds_name, **kwargs)
if self.fig is None:
self.fig = plt.figure(**kwargs)
def geoplot(
self,
data_field=None,
lat_field='lat',
lon_field='lon',
dsname=None,
cbar_label=None,
title=None,
projection=None,
plot_buffer=0.08,
img_tile=None,
img_tile_args={},
tile=8,
cartopy_feature=None,
cmap='rainbow',
text=None,
gridlines=True,
**kwargs,
):
"""
Creates a latitude and longitude plot of a time series data set with
data values indicated by color and described with a colorbar.
Latitude values must be in degree north (-90 to 90) and
longitude must be in degree east (-180 to 180).
Parameters
----------
data_field : str
Name of data field in the dataset to plot.
lat_field : str
Name of latitude field in the dataset to use.
lon_field : str
Name of longitude field in the dataset to use.
dsname : str or None
The name of the datastream to plot. Set to None to make ACT
attempt to automatically determine this.
cbar_label : str
Label to use with colorbar. If set to None will attempt
to create label from long_name and units.
title : str
Plot title.
projection : cartopy.crs object
Project to use on plot. See
https://scitools.org.uk/cartopy/docs/latest/reference/projections.html?highlight=projections
plot_buffer : float
Buffer to add around data on plot in lat and lon dimension.
img_tile : str
Image to use for the plot background. Set to None to not use
background image. For all image background types, see:
https://scitools.org.uk/cartopy/docs/v0.16/cartopy/io/img_tiles.html
Default is None.
img_tile_args : dict
Keyword arguments for the chosen img_tile. These arguments can be
found for the corresponding img_tile here:
https://scitools.org.uk/cartopy/docs/v0.16/cartopy/io/img_tiles.html
Default is an empty dictionary.
tile : int
Tile zoom to use with background image. Higher number indicates
more resolution. A value of 8 is typical for a normal sonde plot.
cartopy_feature : list of str or str
Cartopy feature to add to plot.
cmap : str
Color map to use for colorbar.
text : dictionary
Dictionary of {text:[lon,lat]} to add to plot. Can have more
than one set of text to add.
gridlines : boolean
Use latitude and longitude gridlines.
**kwargs : keyword arguments
Any other keyword arguments that will be passed
into :func:`matplotlib.pyplot.scatter` when the figure
is made. See the matplotlib documentation for further details
on what keyword arguments are available.
"""
if dsname is None and len(self._ds.keys()) > 1:
raise ValueError(
'You must choose a datastream when there are 2 '
'or more datasets in the GeographicPlotDisplay '
'object.'
)
elif dsname is None:
dsname = list(self._ds.keys())[0]
if data_field is None:
raise ValueError('You must enter the name of the data ' 'to be plotted.')
if projection is None:
if CARTOPY_AVAILABLE:
projection = ccrs.PlateCarree()
# Extract data from the dataset
try:
lat = self._ds[dsname][lat_field].values
except KeyError:
raise ValueError(
(
'You will need to provide the name of the '
"field if not '{}' to use for latitude "
'data.'
).format(lat_field)
)
try:
lon = self._ds[dsname][lon_field].values
except KeyError:
raise ValueError(
(
'You will need to provide the name of the '
"field if not '{}' to use for longitude "
'data.'
).format(lon_field)
)
# Set up metadata information for display on plot
if cbar_label is None:
try:
cbar_label = (
self._ds[dsname][data_field].attrs['long_name']
+ ' ('
+ self._ds[dsname][data_field].attrs['units']
+ ')'
)
except KeyError:
cbar_label = data_field
lat_limits = [np.nanmin(lat), np.nanmax(lat)]
lon_limits = [np.nanmin(lon), np.nanmax(lon)]
box_size = np.max([np.abs(np.diff(lat_limits)), np.abs(np.diff(lon_limits))])
bx_buf = box_size * plot_buffer
lat_center = np.sum(lat_limits) / 2.0
lon_center = np.sum(lon_limits) / 2.0
lat_limits = [
lat_center - box_size / 2.0 - bx_buf,
lat_center + box_size / 2.0 + bx_buf,
]
lon_limits = [
lon_center - box_size / 2.0 - bx_buf,
lon_center + box_size / 2.0 + bx_buf,
]
data = self._ds[dsname][data_field].values
# Create base plot projection
ax = plt.axes(projection=projection)
plt.subplots_adjust(left=0.01, right=0.99, bottom=0.05, top=0.93)
ax.set_extent([lon_limits[0], lon_limits[1], lat_limits[0], lat_limits[1]], crs=projection)
if title is None:
try:
dim = list(self._ds[dsname][data_field].dims)
ts = pd.to_datetime(str(self._ds[dsname][dim[0]].values[0]))
date = ts.strftime('%Y-%m-%d')
time_str = ts.strftime('%H:%M:%S')
plt.title(' '.join([dsname, 'at', date, time_str]))
except NameError:
plt.title(dsname)
else:
plt.title(title)
if img_tile is not None:
tiler = getattr(img_tiles, img_tile)(**img_tile_args)
ax.add_image(tiler, tile)
colorbar_map = None
if cmap is not None:
colorbar_map = matplotlib.colormaps.get_cmap(cmap)
sc = ax.scatter(lon, lat, c=data, cmap=colorbar_map, **kwargs)
cbar = plt.colorbar(sc)
cbar.ax.set_ylabel(cbar_label)
if cartopy_feature is not None:
if isinstance(cartopy_feature, str):
cartopy_feature = [cartopy_feature]
cartopy_feature = [ii.upper() for ii in cartopy_feature]
if 'STATES' in cartopy_feature:
ax.add_feature(cfeature.STATES.with_scale('10m'))
if 'LAND' in cartopy_feature:
ax.add_feature(cfeature.LAND)
if 'OCEAN' in cartopy_feature:
ax.add_feature(cfeature.OCEAN)
if 'COASTLINE' in cartopy_feature:
ax.add_feature(cfeature.COASTLINE)
if 'BORDERS' in cartopy_feature:
ax.add_feature(cfeature.BORDERS, linestyle=':')
if 'LAKES' in cartopy_feature:
ax.add_feature(cfeature.LAKES, alpha=0.5)
if 'RIVERS' in cartopy_feature:
ax.add_feature(cfeature.RIVERS)
if text is not None:
for label, location in text.items():
ax.plot(location[0], location[1], marker='*', color='black')
ax.text(location[0], location[1], label, color='black')
if gridlines:
if projection == ccrs.PlateCarree() or projection == ccrs.Mercator:
gl = ax.gridlines(
crs=projection,
draw_labels=True,
linewidth=1,
color='gray',
alpha=0.5,
linestyle='--',
)
gl.top_labels = False
gl.left_labels = True
gl.bottom_labels = True
gl.right_labels = False
gl.xlabel_style = {'size': 6, 'color': 'gray'}
gl.ylabel_style = {'size': 6, 'color': 'gray'}
else:
# Labels are only currently supported for PlateCarree and Mercator
gl = ax.gridlines(
draw_labels=False,
linewidth=1,
color='gray',
alpha=0.5,
linestyle='--',
)
return ax
<|code_end|>
|
Streaking with missing data in 2D plots
### Description
There is a new issue with pcolormesh() causing streaking when there is larger data gaps between data periods. If you look closely at the plot there is a part with some structure at the start and end of the plotted periods. That is the actual data. The extended streaking part is an artifact of pcolormesh(). The issue is that we use add_nan=True to add one NaN value in the middle of the missing data period. pcolormesh() will extend the plot to ½ that window to the inserted NaN. All attempts at changing this behavior with shading keyword (set_shading in .plot() from timeseriesdisplay()) did not fix this issue.

### What I Did
If I modify data_utils.add_in_nan() to add two values during the missing data period, one right after last value and one just before next value, the issue is resolved.

I have some code ready to go. It will add one NaN value in the middle of the data period for 1D data, same as normal. But will add a value right after last value and right before next value. With two values added the streaking is removed. With v2.0 coming will need to integrate correctly.
|
act/utils/data_utils.py
<|code_start|>"""
Module containing utilities for the data.
"""
import importlib
import warnings
import json
import metpy
import numpy as np
import pint
import scipy.stats as stats
import xarray as xr
from pathlib import Path
import re
import requests
spec = importlib.util.find_spec('pyart')
if spec is not None:
PYART_AVAILABLE = True
else:
PYART_AVAILABLE = False
@xr.register_dataset_accessor('utils')
class ChangeUnits:
"""
Class for updating units in the dataset. Data values and units attribute
are updated in place. Coordinate variables can not be updated in place. Must
use new returned dataset when updating coordinage varibles.
"""
def __init__(self, ds):
self._ds = ds
def change_units(
self, variables=None, desired_unit=None, skip_variables=None, skip_standard=True
):
"""
Parameters
----------
variables : None, str or list of str
Variable names to attempt to change units.
desired_unit : str
Desired udunits unit string.
skip_variables : None, str or list of str
Variable names to skip. Works well when not providing a variables
keyword.
skip_standard : boolean
Flag indicating the QC variables that will not need changing are
skipped. Makes the processing faster when processing all variables
in dataset.
Returns
-------
dataset : xarray.dataset
A new dataset if the coordinate variables are updated. Required to
use returned dataset if coordinage variabels are updated,
otherwise the dataset is updated in place.
"""
if variables is not None and isinstance(variables, str):
variables = [variables]
if skip_variables is not None and isinstance(skip_variables, str):
skip_variables = [skip_variables]
if desired_unit is None:
raise ValueError("Need to provide 'desired_unit' keyword for .change_units() method")
if variables is None:
variables = list(self._ds.data_vars)
if skip_variables is not None:
variables = list(set(variables) - set(skip_variables))
for var_name in variables:
try:
if self._ds[var_name].attrs['standard_name'] == 'quality_flag':
continue
except KeyError:
pass
try:
data = convert_units(
self._ds[var_name].values,
self._ds[var_name].attrs['units'],
desired_unit,
)
try:
self._ds[var_name].values = data
self._ds[var_name].attrs['units'] = desired_unit
except ValueError:
attrs = self._ds[var_name].attrs
self._ds = self._ds.assign_coords({var_name: data})
attrs['units'] = desired_unit
self._ds[var_name].attrs = attrs
except (
KeyError,
pint.errors.DimensionalityError,
pint.errors.UndefinedUnitError,
np.core._exceptions.UFuncTypeError,
):
continue
return self._ds
# @xr.register_dataset_accessor('utils')
class DatastreamParserARM(object):
'''
Class to parse ARM datastream names or filenames into its components.
Will return None for each attribute if not extracted from the filename.
Attributes
----------
site : str or None
The site code extracted from the filename.
datastream_class : str
The datastream class extracted from the filename.
facility : str or None
The datastream facility code extracted from the filename.
level : str or None
The datastream level code extracted from the filename.
datastream : str or None
The datastram extracted from the filename.
date : str or None
The date extracted from the filename.
time : str or None
The time extracted from the filename.
ext : str or None
The file extension extracted from the filename.
Example
-------
>>> from act.utils.data_utils import DatastreamParserARM
>>> file = 'sgpmetE13.b1.20190501.024254.nc'
>>> fn_obj = DatastreamParserARM(file)
>>> fn_obj.site
'sgp'
>>> fn_obj.datastream_class
'met'
'''
def __init__(self, ds=''):
'''
Constructor that initializes datastream data member and runs
parse_datastream class method. Also converts datastream name to
lower case before parsing.
ds : str
The datastream or filename to parse
'''
if isinstance(ds, str):
self.__datastream = Path(ds).name
else:
raise ValueError('Datastream or filename name must be a string')
try:
self.__parse_datastream()
except ValueError:
self.__site = None
self.__class = None
self.__facility = None
self.__datastream = None
self.__level = None
self.__date = None
self.__time = None
self.__ext = None
def __parse_datastream(self):
'''
Private method to parse datastream name into its various components
(site, class, facility, and data level. Is called automatically by
constructor when object of class is instantiated and when the
set_datastream method is called to reset the object.
'''
# Import the built-in match function from regular expression library
# self.__datastream = self.__datastream
tempstring = self.__datastream.split('.')
# Check to see if ARM-standard filename was passed
self.__ext = None
self.__time = None
self.__date = None
self.__level = None
self.__site = None
self.__class = None
self.__facility = None
if len(tempstring) >= 5:
self.__ext = tempstring[4]
if len(tempstring) >= 4:
self.__time = tempstring[3]
if len(tempstring) >= 3:
self.__date = tempstring[2]
if len(tempstring) >= 2:
m = re.match('[abcs0][0123456789]', tempstring[1])
if m is not None:
self.__level = m.group()
match = False
m = re.search(r'(^[a-z]{3})(\w+)([A-Z]{1}\d{1,2})$', tempstring[0])
if m is not None:
self.__site = m.group(1)
self.__class = m.group(2)
self.__facility = m.group(3)
match = True
if not match:
m = re.search(r'(^[a-z]{3})(\w+)$', tempstring[0])
if m is not None:
self.__site = m.group(1)
self.__class = m.group(2)
match = True
if not match and len(tempstring[0]) == 3:
self.__site = tempstring[0]
match = True
if not match:
raise ValueError(self.__datastream)
def set_datastream(self, ds):
'''
Method used to set or reset object by passing a new datastream name.
'''
self.__init__(ds)
@property
def datastream(self):
'''
Property returning current datastream name stored in object in
standard lower case. Will return the datastrem with no level if
unavailable.
'''
try:
return ''.join((self.__site, self.__class, self.__facility, '.',
self.__level))
except TypeError:
return None
@property
def site(self):
'''
Property returning current site name stored in object in standard
lower case.
'''
return self.__site
@property
def datastream_class(self):
'''
Property returning current datastream class name stored in object in
standard lower case. Could not use class as attribute name since it
is a reserved word in Python
'''
return self.__class
@property
def facility(self):
'''
Property returning current facility name stored in object in
standard upper case.
'''
try:
return self.__facility.upper()
except AttributeError:
return self.__facility
@property
def level(self):
'''
Property returning current data level stored in object in standard
lower case.
'''
return self.__level
@property
def datastream_standard(self):
'''
Property returning datastream name in ARM-standard format with
facility in caps. Will return the datastream name with no level if
unavailable.
'''
try:
return ''.join((self.site, self.datastream_class, self.facility,
'.', self.level))
except TypeError:
return None
@property
def date(self):
'''
Property returning date from filename.
'''
return self.__date
@property
def time(self):
'''
Property returning time from filename.
'''
return self.__time
@property
def ext(self):
'''
Property returning file extension from filename.
'''
return self.__ext
def assign_coordinates(ds, coord_list):
"""
This procedure will create a new ACT dataset whose coordinates are
designated to be the variables in a given list. This helps make data
slicing via xarray and visualization easier.
Parameters
----------
ds : ACT Dataset
The ACT Dataset to modify the coordinates of.
coord_list : dict
The list of variables to assign as coordinates, given as a dictionary
whose keys are the variable name and values are the dimension name.
Returns
-------
new_ds : ACT Dataset
The new ACT Dataset with the coordinates assigned to be the given
variables.
"""
# Check to make sure that user assigned valid entries for coordinates
for coord in coord_list.keys():
if coord not in ds.variables.keys():
raise KeyError(coord + ' is not a variable in the Dataset.')
if ds.dims[coord_list[coord]] != len(ds.variables[coord]):
raise IndexError(
coord + ' must have the same ' + 'value as length of ' + coord_list[coord]
)
new_ds_dict = {}
for variable in ds.variables.keys():
my_coord_dict = {}
dataarray = ds[variable]
if len(dataarray.dims) > 0:
for coord in coord_list.keys():
if coord_list[coord] in dataarray.dims:
my_coord_dict[coord_list[coord]] = ds[coord]
if variable not in my_coord_dict.keys() and variable not in ds.dims:
the_dataarray = xr.DataArray(dataarray.data, coords=my_coord_dict, dims=dataarray.dims)
new_ds_dict[variable] = the_dataarray
new_ds = xr.Dataset(new_ds_dict, coords=my_coord_dict)
return new_ds
def add_in_nan(time, data):
"""
This procedure adds in NaNs when there is a larger than expected time step.
This is useful for timeseries where there is a gap in data and need a
NaN value to stop plotting from connecting data over the large data gap.
Parameters
----------
time : 1D array of numpy datetime64 or Xarray DataArray of datetime64
Times in the timeseries.
data : 1D or 2D numpy array or Xarray DataArray
Array containing the data. The 0 axis corresponds to time.
Returns
-------
time : numpy array or Xarray DataArray
The array containing the new times including a NaN filled
sampe or slice if multi-dimensional.
The intervals are determined by the mode of the timestep in *time*.
data : numpy array or Xarray DataArray
The array containing the NaN-indserted data.
"""
time_is_DataArray = False
data_is_DataArray = False
if isinstance(time, xr.core.dataarray.DataArray):
time_is_DataArray = True
time_attributes = time.attrs
time_dims = time.dims
if isinstance(data, xr.core.dataarray.DataArray):
data_is_DataArray = True
data_attributes = data.attrs
data_dims = data.dims
# Return if time dimension is only size one since we can't do differences.
if time.size > 2:
data = np.asarray(data)
time = np.asarray(time)
# Not sure if we need to set to second data type to make it work better.
# Leaving code in here in case we need to update.
# diff = np.diff(time.astype('datetime64[s]'), 1)
diff = np.diff(time, 1)
# Wrapping in a try to catch error while switching between numpy 1.10 to 1.11
try:
mode = stats.mode(diff, keepdims=True).mode[0]
except TypeError:
mode = stats.mode(diff).mode[0]
index = np.where(diff > (2.0 * mode))
offset = 0
for i in index[0]:
corr_i = i + offset
time_added = time[corr_i] + (time[corr_i + 1] - time[corr_i]) / 2.0
time = np.insert(time, corr_i + 1, time_added)
data = np.insert(data, corr_i + 1, np.nan, axis=0)
offset += 1
if time_is_DataArray:
time = xr.DataArray(time, attrs=time_attributes, dims=time_dims)
if data_is_DataArray:
data = xr.DataArray(data, attrs=data_attributes, dims=data_dims)
return time, data
def get_missing_value(
ds,
variable,
default=-9999,
add_if_missing_in_ds=False,
use_FillValue=False,
nodefault=False,
):
"""
Function to get missing value from missing_value or _FillValue attribute.
Works well with catching errors and allows for a default value when a
missing value is not listed in the dataset. You may get strange results
becaus xarray will automatically convert all missing_value or
_FillValue to NaN and then remove the missing_value and
_FillValue variable attribute when reading data with default settings.
Parameters
----------
ds : xarray.Dataset
Xarray dataset containing data variable.
variable : str
Variable name to use for getting missing value.
default : int or float
Default value to use if missing value attribute is not in dataset.
add_if_missing_in_ds : bool
Boolean to add to the dataset if does not exist. Default is False.
use_FillValue : bool
Boolean to use _FillValue instead of missing_value. If missing_value
does exist and _FillValue does not will add _FillValue
set to missing_value value.
nodefault : bool
Option to use this to check if the varible has a missing value set and
do not want to get default as return. If the missing value is found
will return, else will return None.
Returns
-------
missing : scalar int or float (or None)
Value used to indicate missing value matching type of data or None if
nodefault keyword set to True.
Examples
--------
.. code-block:: python
from act.utils import get_missing_value
missing = get_missing_value(dq_ds, "temp_mean")
print(missing)
-9999.0
"""
in_ds = False
if use_FillValue:
missing_atts = ['_FillValue', 'missing_value']
else:
missing_atts = ['missing_value', '_FillValue']
for att in missing_atts:
try:
missing = ds[variable].attrs[att]
in_ds = True
break
except (AttributeError, KeyError):
missing = default
# Check if do not want a default value retured and a value
# was not fund.
if nodefault is True and in_ds is False:
missing = None
return missing
# Check data type and try to match missing_value to the data type of data
try:
missing = ds[variable].data.dtype.type(missing)
except KeyError:
pass
except AttributeError:
print(
('--- AttributeError: Issue trying to get data type ' + 'from "{}" data ---').format(
variable
)
)
# If requested add missing value to the dataset
if add_if_missing_in_ds and not in_ds:
try:
ds[variable].attrs[missing_atts[0]] = missing
except KeyError:
print(
('--- KeyError: Issue trying to add "{}" ' + 'attribute to "{}" ---').format(
missing_atts[0], variable
)
)
return missing
def convert_units(data, in_units, out_units):
"""
Wrapper function around library to convert data using unit strings.
Currently using pint units library. Will attempt to preserve numpy
data type, but will upconvert to numpy float64 if need to change
data type for converted values.
Parameters
----------
data : list, tuple or numpy array
Data array to be modified.
in_units : str
Units scalar string of input data array.
out_units : str
Units scalar string of desired output data array.
Returns
-------
data : numpy array
Data array converted into new units.
Examples
--------
> data = np.array([1,2,3,4,5,6])
> data = convert_units(data, 'cm', 'm')
> data
array([0.01, 0.02, 0.03, 0.04, 0.05, 0.06])
"""
# Fix historical and current incorrect usage of units.
convert_dict = {
'C': 'degC',
'F': 'degF',
'%': 'percent', # Pint does not like this symbol with .to('%')
'1': 'unitless', # Pint does not like a number
}
if in_units in convert_dict:
in_units = convert_dict[in_units]
if out_units in convert_dict:
out_units = convert_dict[out_units]
if in_units == out_units:
return data
# Instantiate the registry
ureg = pint.UnitRegistry(autoconvert_offset_to_baseunit=True)
# Add missing units and conversions
ureg.define('fraction = []')
ureg.define('unitless = []')
if not isinstance(data, np.ndarray):
data = np.array(data)
data_type = data.dtype
data_type_kind = data.dtype.kind
# Do the conversion magic
data = (data * ureg(in_units)).to(out_units)
data = data.magnitude
# The data type may be changed by pint. This is a side effect
# of pint changing the datatype to float. Check if the converted values
# need float precision. If so leave, if not change back to orginal
# precision after checking if the precsion is not lost with the orginal
# data type.
if (
data_type_kind == 'i'
and np.nanmin(data) >= np.iinfo(data_type).min
and np.nanmax(data) <= np.iinfo(data_type).max
and np.all(np.mod(data, 1) == 0)
):
data = data.astype(data_type)
return data
def ts_weighted_average(ts_dict):
"""
Program to take in multiple difference time-series and average them
using the weights provided. This assumes that the variables passed in
all have the same units. Please see example gallery for an example.
NOTE: All weights should add up to 1
Parameters
----------
ts_dict : dict
Dictionary containing datastream, variable, weight, and datasets
.. code-block:: python
t_dict = {
"sgpvdisC1.b1": {
"variable": "rain_rate",
"weight": 0.05,
"ds": ds,
},
"sgpmetE13.b1": {
"variable": [
"tbrg_precip_total",
"org_precip_rate_mean",
"pwd_precip_rate_mean_1min",
],
"weight": [0.25, 0.05, 0.0125],
},
}
Returns
-------
data : numpy array
Variable of time-series averaged data
"""
# Run through each datastream/variable and get data
da_array = []
data = 0.0
for d in ts_dict:
for i, v in enumerate(ts_dict[d]['variable']):
new_name = '_'.join([d, v])
# Since many variables may have same name, rename with datastream
da = ts_dict[d]['ds'][v].rename(new_name)
# Apply Weights to Data
da.values = da.values * ts_dict[d]['weight'][i]
da_array.append(da)
da = xr.merge(da_array)
# Stack all the data into a 2D time series
data = None
for i, d in enumerate(da):
if i == 0:
data = da[d].values
else:
data = np.vstack((data, da[d].values))
# Sum data across each time sample
data = np.nansum(data, 0)
# Add data to data array and return
dims = ts_dict[list(ts_dict.keys())[0]]['ds'].dims
da_xr = xr.DataArray(
data,
dims=dims,
coords={'time': ts_dict[list(ts_dict.keys())[0]]['ds']['time']},
)
da_xr.attrs['long_name'] = 'Weighted average of ' + ', '.join(list(ts_dict.keys()))
return da_xr
def accumulate_precip(ds, variable, time_delta=None):
"""
Program to accumulate rain rates from an act xarray dataset and insert
variable back into an act xarray dataset with "_accumulated" appended to
the variable name. Please verify that your units are accurately described
in the data.
Parameters
----------
ds : xarray.DataSet
ACT Xarray dataset.
variable : string
Variable name.
time_delta : float
Time delta to caculate precip accumulations over.
Useful if full time series is not passed in.
Returns
-------
ds : xarray.DataSet
ACT Xarray dataset with variable_accumulated.
"""
# Get Data, time, and metadat
data = ds[variable]
time = ds.coords['time']
units = ds[variable].attrs['units']
# Calculate mode of the time samples(i.e. 1 min vs 1 sec)
if time_delta is None:
diff = np.diff(time.values, 1) / np.timedelta64(1, 's')
try:
t_delta = stats.mode(diff, keepdims=False).mode
except TypeError:
t_delta = stats.mode(diff).mode
else:
t_delta = time_delta
# Calculate the accumulation based on the units
t_factor = t_delta / 60.0
if units == 'mm/hr':
data = data * (t_factor / 60.0)
accum = np.nancumsum(data.values)
# Add accumulated variable back to the dataset
long_name = 'Accumulated precipitation'
attrs = {'long_name': long_name, 'units': 'mm'}
ds['_'.join([variable, 'accumulated'])] = xr.DataArray(
accum, coords=ds[variable].coords, attrs=attrs
)
return ds
def create_pyart_obj(
ds,
variables=None,
sweep=None,
azimuth=None,
elevation=None,
range_var=None,
sweep_start=None,
sweep_end=None,
lat=None,
lon=None,
alt=None,
sweep_mode='ppi',
sweep_az_thresh=10.0,
sweep_el_thresh=0.5,
):
"""
Produces a Py-ART radar object based on data in the ACT Xarray dataset.
Parameters
----------
ds : xarray.DataSet
ACT Xarray dataset.
variables : list
List of variables to add to the radar object, will default to all
variables.
sweep : string
Name of variable that has sweep information. If none, will try and
calculate from the azimuth and elevation.
azimuth : string
Name of azimuth variable. Will try and find one if none given.
elevation : string
Name of elevation variable. Will try and find one if none given.
range_var : string
Name of the range variable. Will try and find one if none given.
sweep_start : string
Name of variable with sweep start indices.
sweep_end : string
Name of variable with sweep end indices.
lat : string
Name of latitude variable. Will try and find one if none given.
lon : string
Name of longitude variable. Will try and find one if none given.
alt : string
Name of altitude variable. Will try and find one if none given.
sweep_mode : string
Type of scan. Defaults to PPI.
sweep_az_thresh : float
If calculating sweep numbers, the maximum change in azimuth before new
sweep.
sweep_el_thresh : float
If calculating sweep numbers, the maximum change in elevation before
new sweep.
Returns
-------
radar : radar.Radar
Py-ART Radar Object.
"""
if not PYART_AVAILABLE:
raise ImportError(
'Py-ART needs to be installed on your system to convert to ' 'Py-ART Object.'
)
else:
import pyart
# Get list of variables if none provided
if variables is None:
variables = list(ds.keys())
# Determine the sweeps if not already in a variable$a
if sweep is None:
swp = np.zeros(ds.sizes['time'])
for key in ds.variables.keys():
if len(ds.variables[key].shape) == 2:
total_rays = ds.variables[key].shape[0]
break
nsweeps = int(total_rays / ds.variables['time'].shape[0])
else:
swp = ds[sweep].values
nsweeps = ds[sweep].values
# Get coordinate variables
if lat is None:
lat = [s for s in variables if 'latitude' in s]
if len(lat) == 0:
lat = [s for s in variables if 'lat' in s]
if len(lat) == 0:
raise ValueError(
'Latitude variable not set and could not be ' 'discerned from the data.'
)
else:
lat = lat[0]
if lon is None:
lon = [s for s in variables if 'longitude' in s]
if len(lon) == 0:
lon = [s for s in variables if 'lon' in s]
if len(lon) == 0:
raise ValueError(
'Longitude variable not set and could not be ' 'discerned from the data.'
)
else:
lon = lon[0]
if alt is None:
alt = [s for s in variables if 'altitude' in s]
if len(alt) == 0:
alt = [s for s in variables if 'alt' in s]
if len(alt) == 0:
raise ValueError(
'Altitude variable not set and could not be ' 'discerned from the data.'
)
else:
alt = alt[0]
# Get additional variable names if none provided
if azimuth is None:
azimuth = [s for s in sorted(variables) if 'azimuth' in s][0]
if len(azimuth) == 0:
raise ValueError(
'Azimuth variable not set and could not be ' 'discerned from the data.'
)
if elevation is None:
elevation = [s for s in sorted(variables) if 'elevation' in s][0]
if len(elevation) == 0:
raise ValueError(
'Elevation variable not set and could not be ' 'discerned from the data.'
)
if range_var is None:
range_var = [s for s in sorted(variables) if 'range' in s][0]
if len(range_var) == 0:
raise ValueError('Range variable not set and could not be ' 'discerned from the data.')
# Calculate the sweep indices if not passed in
if sweep_start is None and sweep_end is None:
az_diff = np.abs(np.diff(ds[azimuth].values))
az_idx = az_diff > sweep_az_thresh
el_diff = np.abs(np.diff(ds[elevation].values))
el_idx = el_diff > sweep_el_thresh
# Create index list
az_index = list(np.where(az_idx)[0] + 1)
el_index = list(np.where(el_idx)[0] + 1)
index = sorted(az_index + el_index)
index.insert(0, 0)
index += [ds.sizes['time']]
sweep_start_index = []
sweep_end_index = []
for i in range(len(index) - 1):
sweep_start_index.append(index[i])
sweep_end_index.append(index[i + 1] - 1)
swp[index[i] : index[i + 1]] = i
else:
sweep_start_index = ds[sweep_start].values
sweep_end_index = ds[sweep_end].values
if sweep is None:
for i in range(len(sweep_start_index)):
swp[sweep_start_index[i] : sweep_end_index[i]] = i
radar = pyart.testing.make_empty_ppi_radar(ds.sizes[range_var], ds.sizes['time'], nsweeps)
radar.time['data'] = np.array(ds['time'].values)
# Add lat, lon, and alt
radar.latitude['data'] = np.array(ds[lat].values)
radar.longitude['data'] = np.array(ds[lon].values)
radar.altitude['data'] = np.array(ds[alt].values)
# Add sweep information
radar.sweep_number['data'] = swp
radar.sweep_start_ray_index['data'] = sweep_start_index
radar.sweep_end_ray_index['data'] = sweep_end_index
radar.sweep_mode['data'] = np.array(sweep_mode)
radar.scan_type = sweep_mode
# Add elevation, azimuth, etc...
radar.azimuth['data'] = np.array(ds[azimuth])
radar.elevation['data'] = np.array(ds[elevation])
radar.fixed_angle['data'] = np.array(ds[elevation].values[0])
radar.range['data'] = np.array(ds[range_var].values)
# Calculate radar points in lat/lon
radar.init_gate_altitude()
radar.init_gate_longitude_latitude()
# Add the fields to the radar object
fields = {}
for v in variables:
ref_dict = pyart.config.get_metadata(v)
ref_dict['data'] = np.array(ds[v].values)
fields[v] = ref_dict
radar.fields = fields
return radar
def convert_to_potential_temp(
ds=None,
temp_var_name=None,
press_var_name=None,
temperature=None,
pressure=None,
temp_var_units=None,
press_var_units=None,
):
"""
Converts temperature to potential temperature.
Parameters
----------
ds : xarray.DataSet
ACT Xarray dataset
temp_var_name : str
Temperature variable name in the ACT Xarray dataset containing
temperature data to convert.
press_var_name : str
Pressure variable name in the ACT Xarray dataset containing the
pressure data to use in conversion. If not set or set to None will
use values from pressure keyword.
pressure : int, float, numpy array
Optional pressure values to use instead of using values from xarray
dataset. If set must also set press_var_units keyword.
temp_var_units : string
Pint recognized units string for temperature data. If set to None will
use the units attribute under temperature variable in ds.
press_var_units : string
Pint recognized units string for pressure data. If set to None will
use the units attribute under pressure variable in the dataset. If using
the pressure keyword this must be set.
Returns
-------
potential_temperature : None, int, float, numpy array
The converted temperature to potential temperature or None if something
goes wrong.
References
----------
May, R. M., Arms, S. C., Marsh, P., Bruning, E., Leeman, J. R., Goebbert,
K., Thielen, J. E., and Bruick, Z., 2021: MetPy: A Python Package for
Meteorological Data. Unidata, https://github.com/Unidata/MetPy,
doi:10.5065/D6WW7G29.
"""
potential_temp = None
if temp_var_units is None and temp_var_name is not None:
temp_var_units = ds[temp_var_name].attrs['units']
if press_var_units is None and press_var_name is not None:
press_var_units = ds[press_var_name].attrs['units']
if press_var_units is None:
raise ValueError(
"Need to provide 'press_var_units' keyword " "when using 'pressure' keyword"
)
if temp_var_units is None:
raise ValueError(
"Need to provide 'temp_var_units' keyword " "when using 'temperature' keyword"
)
if temperature is not None:
temperature = metpy.units.units.Quantity(temperature, temp_var_units)
else:
temperature = metpy.units.units.Quantity(ds[temp_var_name].values, temp_var_units)
if pressure is not None:
pressure = metpy.units.units.Quantity(pressure, press_var_units)
else:
pressure = metpy.units.units.Quantity(ds[press_var_name].values, press_var_units)
with warnings.catch_warnings():
warnings.filterwarnings('ignore', category=RuntimeWarning)
potential_temp = metpy.calc.potential_temperature(pressure, temperature)
potential_temp = potential_temp.to(temp_var_units).magnitude
return potential_temp
def height_adjusted_temperature(
ds=None,
temp_var_name=None,
height_difference=0,
height_units='m',
press_var_name=None,
temperature=None,
temp_var_units=None,
pressure=101.325,
press_var_units='kPa',
):
"""
Converts temperature for change in height.
Parameters
----------
ds : xarray.DataSet, None
Optional Xarray dataset for retrieving pressure and temperature values.
Not needed if using temperature keyword.
temp_var_name : str, None
Optional temperature variable name in the Xarray dataset containing the
temperature data to use in conversion. If not set or set to None will
use values from temperature keyword.
height_difference : int, float
Required difference in height to adjust pressure values. Positive
values to increase height negative values to decrease height.
height_units : str
Units of height value.
press_var_name : str, None
Optional pressure variable name in the Xarray dataset containing the
pressure data to use in conversion. If not set or set to None will
use values from pressure keyword.
temperature : int, float, numpy array, None
Optional temperature values to use instead of values in the dataset.
temp_var_units : str, None
Pint recognized units string for temperature data. If set to None will
use the units attribute under temperature variable in the dataset.
If using the temperature keyword this must be set.
pressure : int, float, numpy array, None
Optional pressure values to use instead of values in the dataset.
Default value of sea level pressure is set for ease of use.
press_var_units : str, None
Pint recognized units string for pressure data. If set to None will
use the units attribute under pressure variable in the dataset.
If using the pressure keyword this must be set. Default value of
sea level pressure is set for ease of use.
Returns
-------
adjusted_temperature : None, int, float, numpy array
The height adjusted temperature or None if something goes wrong.
References
----------
May, R. M., Arms, S. C., Marsh, P., Bruning, E., Leeman, J. R., Goebbert,
K., Thielen, J. E., and Bruick, Z., 2021: MetPy: A Python Package for
Meteorological Data. Unidata, https://github.com/Unidata/MetPy,
doi:10.5065/D6WW7G29.
"""
adjusted_temperature = None
if temp_var_units is None and temperature is None:
temp_var_units = ds[temp_var_name].attrs['units']
if temp_var_units is None:
raise ValueError(
"Need to provide 'temp_var_units' keyword when " 'providing temperature keyword values.'
)
if temperature is not None:
temperature = metpy.units.units.Quantity(temperature, temp_var_units)
else:
temperature = metpy.units.units.Quantity(ds[temp_var_name].values, temp_var_units)
if press_var_name is not None:
pressure = metpy.units.units.Quantity(ds[press_var_name].values, press_var_units)
else:
pressure = metpy.units.units.Quantity(pressure, press_var_units)
adjusted_pressure = height_adjusted_pressure(
height_difference=height_difference,
height_units=height_units,
pressure=pressure.magnitude,
press_var_units=press_var_units,
)
adjusted_pressure = metpy.units.units.Quantity(adjusted_pressure, press_var_units)
with warnings.catch_warnings():
warnings.filterwarnings('ignore', category=RuntimeWarning)
adjusted_temperature = metpy.calc.dry_lapse(adjusted_pressure, temperature, pressure)
adjusted_temperature = adjusted_temperature.to(temp_var_units).magnitude
return adjusted_temperature
def height_adjusted_pressure(
ds=None,
press_var_name=None,
height_difference=0,
height_units='m',
pressure=None,
press_var_units=None,
):
"""
Converts pressure for change in height.
Parameters
----------
ds : xarray.DataSet, None
Optional Xarray dataset for retrieving pressure values. Not needed if
using pressure keyword.
press_var_name : str, None
Optional pressure variable name in the Xarray dataset containing the
pressure data to use in conversion. If not set or set to None will
use values from pressure keyword.
height_difference : int, float
Required difference in height to adjust pressure values. Positive
values to increase height negative values to decrease height.
height_units : str
Units of height value.
pressure : int, float, numpy array, None
Optional pressure values to use instead of values in the dataset.
press_var_units : str, None
Pint recognized units string for pressure data. If set to None will
use the units attribute under pressure variable in the dataset.
If using the pressure keyword this must be set.
Returns
-------
adjusted_pressure : None, int, float, numpy array
The height adjusted pressure or None if something goes wrong.
References
----------
May, R. M., Arms, S. C., Marsh, P., Bruning, E., Leeman, J. R., Goebbert,
K., Thielen, J. E., and Bruick, Z., 2021: MetPy: A Python Package for
Meteorological Data. Unidata, https://github.com/Unidata/MetPy,
doi:10.5065/D6WW7G29.
"""
adjusted_pressure = None
if press_var_units is None and pressure is None:
press_var_units = ds[press_var_name].attrs['units']
if press_var_units is None:
raise ValueError(
"Need to provide 'press_var_units' keyword when " 'providing pressure keyword values.'
)
if pressure is not None:
pressure = metpy.units.units.Quantity(pressure, press_var_units)
else:
pressure = metpy.units.units.Quantity(ds[press_var_name].values, press_var_units)
height_difference = metpy.units.units.Quantity(height_difference, height_units)
with warnings.catch_warnings():
warnings.filterwarnings('ignore', category=RuntimeWarning)
adjusted_pressure = metpy.calc.add_height_to_pressure(pressure, height_difference)
adjusted_pressure = adjusted_pressure.to(press_var_units).magnitude
return adjusted_pressure
def arm_site_location_search(site_code='sgp', facility_code=None):
"""
Parameters
----------
site_code : str
ARM site code to retrieve facilities and coordinate information. Example and default
is 'sgp'.
facility_code : str or None
Facility code or codes for the ARM site provided. If None is provided, all facilities are returned.
Example string for multiple facilities is 'A4,I5'.
Returns
-------
coord_dict : dict
A dictionary containing the facility chosen coordinate information or all facilities
if None for facility_code and their respective coordinates.
"""
headers = {
'Content-Type': 'application/json',
}
# Return all facilities if facility_code is None else set the query to include
# facility search
if facility_code is None:
query = "site_code:" + site_code
else:
query = "site_code:" + site_code + " AND facility_code:" + facility_code
# Search aggregation for elastic search
json_data = {
"aggs": {
"distinct_facility_code": {
"terms": {
"field": "facility_code.keyword",
"order": {
"_key": "asc"
},
"size": 7000,
},
"aggs": {
"hits": {
"top_hits": {
"_source": [
"site_type",
"site_code",
"facility_code",
"location",
],
"size": 1
},
},
},
},
},
"size": 0,
"query": {
"query_string": {
"query": query,
},
},
}
# Uses requests to grab metadata from arm.gov.
response = requests.get('https://adc.arm.gov/elastic/metadata/_search', headers=headers, json=json_data)
# Loads the text to a dictionary
response_dict = json.loads(response.text)
# Searches dictionary for the site, facility and coordinate information.
coord_dict = {}
# Loop through each facility.
for i in range(len(response_dict['aggregations']['distinct_facility_code']['buckets'])):
site_info = response_dict['aggregations']['distinct_facility_code']['buckets'][i]['hits']['hits']['hits'][0]['_source']
site = site_info['site_code']
facility = site_info['facility_code']
# Some sites do not contain coordinate information, return None if that is the case.
if site_info['location'] is None:
coords = {'latitude': None,
'longitude': None}
else:
lat, lon = site_info['location'].split(',')
lat = float(lat)
lon = float(lon)
coords = {'latitude': lat,
'longitude': lon}
coord_dict.setdefault(site + ' ' + facility, coords)
return coord_dict
<|code_end|>
|
act/utils/data_utils.py
<|code_start|>"""
Module containing utilities for the data.
"""
import importlib
import warnings
import json
import metpy
import numpy as np
import pint
import scipy.stats as stats
import xarray as xr
from pathlib import Path
import re
import requests
spec = importlib.util.find_spec('pyart')
if spec is not None:
PYART_AVAILABLE = True
else:
PYART_AVAILABLE = False
@xr.register_dataset_accessor('utils')
class ChangeUnits:
"""
Class for updating units in the dataset. Data values and units attribute
are updated in place. Coordinate variables can not be updated in place. Must
use new returned dataset when updating coordinage varibles.
"""
def __init__(self, ds):
self._ds = ds
def change_units(
self, variables=None, desired_unit=None, skip_variables=None, skip_standard=True
):
"""
Parameters
----------
variables : None, str or list of str
Variable names to attempt to change units.
desired_unit : str
Desired udunits unit string.
skip_variables : None, str or list of str
Variable names to skip. Works well when not providing a variables
keyword.
skip_standard : boolean
Flag indicating the QC variables that will not need changing are
skipped. Makes the processing faster when processing all variables
in dataset.
Returns
-------
dataset : xarray.dataset
A new dataset if the coordinate variables are updated. Required to
use returned dataset if coordinage variabels are updated,
otherwise the dataset is updated in place.
"""
if variables is not None and isinstance(variables, str):
variables = [variables]
if skip_variables is not None and isinstance(skip_variables, str):
skip_variables = [skip_variables]
if desired_unit is None:
raise ValueError("Need to provide 'desired_unit' keyword for .change_units() method")
if variables is None:
variables = list(self._ds.data_vars)
if skip_variables is not None:
variables = list(set(variables) - set(skip_variables))
for var_name in variables:
try:
if self._ds[var_name].attrs['standard_name'] == 'quality_flag':
continue
except KeyError:
pass
try:
data = convert_units(
self._ds[var_name].values,
self._ds[var_name].attrs['units'],
desired_unit,
)
try:
self._ds[var_name].values = data
self._ds[var_name].attrs['units'] = desired_unit
except ValueError:
attrs = self._ds[var_name].attrs
self._ds = self._ds.assign_coords({var_name: data})
attrs['units'] = desired_unit
self._ds[var_name].attrs = attrs
except (
KeyError,
pint.errors.DimensionalityError,
pint.errors.UndefinedUnitError,
np.core._exceptions.UFuncTypeError,
):
continue
return self._ds
# @xr.register_dataset_accessor('utils')
class DatastreamParserARM(object):
'''
Class to parse ARM datastream names or filenames into its components.
Will return None for each attribute if not extracted from the filename.
Attributes
----------
site : str or None
The site code extracted from the filename.
datastream_class : str
The datastream class extracted from the filename.
facility : str or None
The datastream facility code extracted from the filename.
level : str or None
The datastream level code extracted from the filename.
datastream : str or None
The datastram extracted from the filename.
date : str or None
The date extracted from the filename.
time : str or None
The time extracted from the filename.
ext : str or None
The file extension extracted from the filename.
Example
-------
>>> from act.utils.data_utils import DatastreamParserARM
>>> file = 'sgpmetE13.b1.20190501.024254.nc'
>>> fn_obj = DatastreamParserARM(file)
>>> fn_obj.site
'sgp'
>>> fn_obj.datastream_class
'met'
'''
def __init__(self, ds=''):
'''
Constructor that initializes datastream data member and runs
parse_datastream class method. Also converts datastream name to
lower case before parsing.
ds : str
The datastream or filename to parse
'''
if isinstance(ds, str):
self.__datastream = Path(ds).name
else:
raise ValueError('Datastream or filename name must be a string')
try:
self.__parse_datastream()
except ValueError:
self.__site = None
self.__class = None
self.__facility = None
self.__datastream = None
self.__level = None
self.__date = None
self.__time = None
self.__ext = None
def __parse_datastream(self):
'''
Private method to parse datastream name into its various components
(site, class, facility, and data level. Is called automatically by
constructor when object of class is instantiated and when the
set_datastream method is called to reset the object.
'''
# Import the built-in match function from regular expression library
# self.__datastream = self.__datastream
tempstring = self.__datastream.split('.')
# Check to see if ARM-standard filename was passed
self.__ext = None
self.__time = None
self.__date = None
self.__level = None
self.__site = None
self.__class = None
self.__facility = None
if len(tempstring) >= 5:
self.__ext = tempstring[4]
if len(tempstring) >= 4:
self.__time = tempstring[3]
if len(tempstring) >= 3:
self.__date = tempstring[2]
if len(tempstring) >= 2:
m = re.match('[abcs0][0123456789]', tempstring[1])
if m is not None:
self.__level = m.group()
match = False
m = re.search(r'(^[a-z]{3})(\w+)([A-Z]{1}\d{1,2})$', tempstring[0])
if m is not None:
self.__site = m.group(1)
self.__class = m.group(2)
self.__facility = m.group(3)
match = True
if not match:
m = re.search(r'(^[a-z]{3})(\w+)$', tempstring[0])
if m is not None:
self.__site = m.group(1)
self.__class = m.group(2)
match = True
if not match and len(tempstring[0]) == 3:
self.__site = tempstring[0]
match = True
if not match:
raise ValueError(self.__datastream)
def set_datastream(self, ds):
'''
Method used to set or reset object by passing a new datastream name.
'''
self.__init__(ds)
@property
def datastream(self):
'''
Property returning current datastream name stored in object in
standard lower case. Will return the datastrem with no level if
unavailable.
'''
try:
return ''.join((self.__site, self.__class, self.__facility, '.',
self.__level))
except TypeError:
return None
@property
def site(self):
'''
Property returning current site name stored in object in standard
lower case.
'''
return self.__site
@property
def datastream_class(self):
'''
Property returning current datastream class name stored in object in
standard lower case. Could not use class as attribute name since it
is a reserved word in Python
'''
return self.__class
@property
def facility(self):
'''
Property returning current facility name stored in object in
standard upper case.
'''
try:
return self.__facility.upper()
except AttributeError:
return self.__facility
@property
def level(self):
'''
Property returning current data level stored in object in standard
lower case.
'''
return self.__level
@property
def datastream_standard(self):
'''
Property returning datastream name in ARM-standard format with
facility in caps. Will return the datastream name with no level if
unavailable.
'''
try:
return ''.join((self.site, self.datastream_class, self.facility,
'.', self.level))
except TypeError:
return None
@property
def date(self):
'''
Property returning date from filename.
'''
return self.__date
@property
def time(self):
'''
Property returning time from filename.
'''
return self.__time
@property
def ext(self):
'''
Property returning file extension from filename.
'''
return self.__ext
def assign_coordinates(ds, coord_list):
"""
This procedure will create a new ACT dataset whose coordinates are
designated to be the variables in a given list. This helps make data
slicing via xarray and visualization easier.
Parameters
----------
ds : ACT Dataset
The ACT Dataset to modify the coordinates of.
coord_list : dict
The list of variables to assign as coordinates, given as a dictionary
whose keys are the variable name and values are the dimension name.
Returns
-------
new_ds : ACT Dataset
The new ACT Dataset with the coordinates assigned to be the given
variables.
"""
# Check to make sure that user assigned valid entries for coordinates
for coord in coord_list.keys():
if coord not in ds.variables.keys():
raise KeyError(coord + ' is not a variable in the Dataset.')
if ds.dims[coord_list[coord]] != len(ds.variables[coord]):
raise IndexError(
coord + ' must have the same ' + 'value as length of ' + coord_list[coord]
)
new_ds_dict = {}
for variable in ds.variables.keys():
my_coord_dict = {}
dataarray = ds[variable]
if len(dataarray.dims) > 0:
for coord in coord_list.keys():
if coord_list[coord] in dataarray.dims:
my_coord_dict[coord_list[coord]] = ds[coord]
if variable not in my_coord_dict.keys() and variable not in ds.dims:
the_dataarray = xr.DataArray(dataarray.data, coords=my_coord_dict, dims=dataarray.dims)
new_ds_dict[variable] = the_dataarray
new_ds = xr.Dataset(new_ds_dict, coords=my_coord_dict)
return new_ds
def add_in_nan(time, data):
"""
This procedure adds in NaNs when there is a larger than expected time step.
This is useful for timeseries where there is a gap in data and need a
NaN value to stop plotting from connecting data over the large data gap.
Parameters
----------
time : 1D array of numpy datetime64 or Xarray DataArray of datetime64
Times in the timeseries.
data : 1D or 2D numpy array or Xarray DataArray
Array containing the data. The 0 axis corresponds to time.
Returns
-------
time : numpy array or Xarray DataArray
The array containing the new times including a NaN filled
sampe or slice if multi-dimensional.
The intervals are determined by the mode of the timestep in *time*.
data : numpy array or Xarray DataArray
The array containing the NaN-indserted data.
"""
time_is_DataArray = False
data_is_DataArray = False
if isinstance(time, xr.core.dataarray.DataArray):
time_is_DataArray = True
time_attributes = time.attrs
time_dims = time.dims
if isinstance(data, xr.core.dataarray.DataArray):
data_is_DataArray = True
data_attributes = data.attrs
data_dims = data.dims
# Return if time dimension is only size one since we can't do differences.
if time.size > 2:
data = np.asarray(data)
time = np.asarray(time)
# Not sure if we need to set to second data type to make it work better.
# Leaving code in here in case we need to update.
# diff = np.diff(time.astype('datetime64[s]'), 1)
diff = np.diff(time, 1)
# Wrapping in a try to catch error while switching between numpy 1.10 to 1.11
try:
mode = stats.mode(diff, keepdims=True).mode[0]
except TypeError:
mode = stats.mode(diff).mode[0]
index = np.where(diff > (2.0 * mode))
offset = 0
for i in index[0]:
corr_i = i + offset
if len(data.shape) == 1:
# For line plotting adding a NaN will stop the connection of the line
# between points. So we just need to add a NaN anywhere between the points.
corr_i = i + offset
time_added = time[corr_i] + (time[corr_i + 1] - time[corr_i]) / 2.0
time = np.insert(time, corr_i + 1, time_added)
data = np.insert(data, corr_i + 1, np.nan, axis=0)
offset += 1
else:
# For 2D plots need to add a NaN right after and right before the data
# to correctly mitigate streaking with pcolormesh.
time_added_1 = time[corr_i] + 1 # One time step after
time_added_2 = time[corr_i + 1] - 1 # One time step before
time = np.insert(time, corr_i + 1, [time_added_1, time_added_2])
data = np.insert(data, corr_i + 1, np.nan, axis=0)
data = np.insert(data, corr_i + 2, np.nan, axis=0)
offset += 2
if time_is_DataArray:
time = xr.DataArray(time, attrs=time_attributes, dims=time_dims)
if data_is_DataArray:
data = xr.DataArray(data, attrs=data_attributes, dims=data_dims)
return time, data
def get_missing_value(
ds,
variable,
default=-9999,
add_if_missing_in_ds=False,
use_FillValue=False,
nodefault=False,
):
"""
Function to get missing value from missing_value or _FillValue attribute.
Works well with catching errors and allows for a default value when a
missing value is not listed in the dataset. You may get strange results
becaus xarray will automatically convert all missing_value or
_FillValue to NaN and then remove the missing_value and
_FillValue variable attribute when reading data with default settings.
Parameters
----------
ds : xarray.Dataset
Xarray dataset containing data variable.
variable : str
Variable name to use for getting missing value.
default : int or float
Default value to use if missing value attribute is not in dataset.
add_if_missing_in_ds : bool
Boolean to add to the dataset if does not exist. Default is False.
use_FillValue : bool
Boolean to use _FillValue instead of missing_value. If missing_value
does exist and _FillValue does not will add _FillValue
set to missing_value value.
nodefault : bool
Option to use this to check if the varible has a missing value set and
do not want to get default as return. If the missing value is found
will return, else will return None.
Returns
-------
missing : scalar int or float (or None)
Value used to indicate missing value matching type of data or None if
nodefault keyword set to True.
Examples
--------
.. code-block:: python
from act.utils import get_missing_value
missing = get_missing_value(dq_ds, "temp_mean")
print(missing)
-9999.0
"""
in_ds = False
if use_FillValue:
missing_atts = ['_FillValue', 'missing_value']
else:
missing_atts = ['missing_value', '_FillValue']
for att in missing_atts:
try:
missing = ds[variable].attrs[att]
in_ds = True
break
except (AttributeError, KeyError):
missing = default
# Check if do not want a default value retured and a value
# was not fund.
if nodefault is True and in_ds is False:
missing = None
return missing
# Check data type and try to match missing_value to the data type of data
try:
missing = ds[variable].data.dtype.type(missing)
except KeyError:
pass
except AttributeError:
print(
('--- AttributeError: Issue trying to get data type ' + 'from "{}" data ---').format(
variable
)
)
# If requested add missing value to the dataset
if add_if_missing_in_ds and not in_ds:
try:
ds[variable].attrs[missing_atts[0]] = missing
except KeyError:
print(
('--- KeyError: Issue trying to add "{}" ' + 'attribute to "{}" ---').format(
missing_atts[0], variable
)
)
return missing
def convert_units(data, in_units, out_units):
"""
Wrapper function around library to convert data using unit strings.
Currently using pint units library. Will attempt to preserve numpy
data type, but will upconvert to numpy float64 if need to change
data type for converted values.
Parameters
----------
data : list, tuple or numpy array
Data array to be modified.
in_units : str
Units scalar string of input data array.
out_units : str
Units scalar string of desired output data array.
Returns
-------
data : numpy array
Data array converted into new units.
Examples
--------
> data = np.array([1,2,3,4,5,6])
> data = convert_units(data, 'cm', 'm')
> data
array([0.01, 0.02, 0.03, 0.04, 0.05, 0.06])
"""
# Fix historical and current incorrect usage of units.
convert_dict = {
'C': 'degC',
'F': 'degF',
'%': 'percent', # Pint does not like this symbol with .to('%')
'1': 'unitless', # Pint does not like a number
}
if in_units in convert_dict:
in_units = convert_dict[in_units]
if out_units in convert_dict:
out_units = convert_dict[out_units]
if in_units == out_units:
return data
# Instantiate the registry
ureg = pint.UnitRegistry(autoconvert_offset_to_baseunit=True)
# Add missing units and conversions
ureg.define('fraction = []')
ureg.define('unitless = []')
if not isinstance(data, np.ndarray):
data = np.array(data)
data_type = data.dtype
data_type_kind = data.dtype.kind
# Do the conversion magic
data = (data * ureg(in_units)).to(out_units)
data = data.magnitude
# The data type may be changed by pint. This is a side effect
# of pint changing the datatype to float. Check if the converted values
# need float precision. If so leave, if not change back to orginal
# precision after checking if the precsion is not lost with the orginal
# data type.
if (
data_type_kind == 'i'
and np.nanmin(data) >= np.iinfo(data_type).min
and np.nanmax(data) <= np.iinfo(data_type).max
and np.all(np.mod(data, 1) == 0)
):
data = data.astype(data_type)
return data
def ts_weighted_average(ts_dict):
"""
Program to take in multiple difference time-series and average them
using the weights provided. This assumes that the variables passed in
all have the same units. Please see example gallery for an example.
NOTE: All weights should add up to 1
Parameters
----------
ts_dict : dict
Dictionary containing datastream, variable, weight, and datasets
.. code-block:: python
t_dict = {
"sgpvdisC1.b1": {
"variable": "rain_rate",
"weight": 0.05,
"ds": ds,
},
"sgpmetE13.b1": {
"variable": [
"tbrg_precip_total",
"org_precip_rate_mean",
"pwd_precip_rate_mean_1min",
],
"weight": [0.25, 0.05, 0.0125],
},
}
Returns
-------
data : numpy array
Variable of time-series averaged data
"""
# Run through each datastream/variable and get data
da_array = []
data = 0.0
for d in ts_dict:
for i, v in enumerate(ts_dict[d]['variable']):
new_name = '_'.join([d, v])
# Since many variables may have same name, rename with datastream
da = ts_dict[d]['ds'][v].rename(new_name)
# Apply Weights to Data
da.values = da.values * ts_dict[d]['weight'][i]
da_array.append(da)
da = xr.merge(da_array)
# Stack all the data into a 2D time series
data = None
for i, d in enumerate(da):
if i == 0:
data = da[d].values
else:
data = np.vstack((data, da[d].values))
# Sum data across each time sample
data = np.nansum(data, 0)
# Add data to data array and return
dims = ts_dict[list(ts_dict.keys())[0]]['ds'].dims
da_xr = xr.DataArray(
data,
dims=dims,
coords={'time': ts_dict[list(ts_dict.keys())[0]]['ds']['time']},
)
da_xr.attrs['long_name'] = 'Weighted average of ' + ', '.join(list(ts_dict.keys()))
return da_xr
def accumulate_precip(ds, variable, time_delta=None):
"""
Program to accumulate rain rates from an act xarray dataset and insert
variable back into an act xarray dataset with "_accumulated" appended to
the variable name. Please verify that your units are accurately described
in the data.
Parameters
----------
ds : xarray.DataSet
ACT Xarray dataset.
variable : string
Variable name.
time_delta : float
Time delta to caculate precip accumulations over.
Useful if full time series is not passed in.
Returns
-------
ds : xarray.DataSet
ACT Xarray dataset with variable_accumulated.
"""
# Get Data, time, and metadat
data = ds[variable]
time = ds.coords['time']
units = ds[variable].attrs['units']
# Calculate mode of the time samples(i.e. 1 min vs 1 sec)
if time_delta is None:
diff = np.diff(time.values, 1) / np.timedelta64(1, 's')
try:
t_delta = stats.mode(diff, keepdims=False).mode
except TypeError:
t_delta = stats.mode(diff).mode
else:
t_delta = time_delta
# Calculate the accumulation based on the units
t_factor = t_delta / 60.0
if units == 'mm/hr':
data = data * (t_factor / 60.0)
accum = np.nancumsum(data.values)
# Add accumulated variable back to the dataset
long_name = 'Accumulated precipitation'
attrs = {'long_name': long_name, 'units': 'mm'}
ds['_'.join([variable, 'accumulated'])] = xr.DataArray(
accum, coords=ds[variable].coords, attrs=attrs
)
return ds
def create_pyart_obj(
ds,
variables=None,
sweep=None,
azimuth=None,
elevation=None,
range_var=None,
sweep_start=None,
sweep_end=None,
lat=None,
lon=None,
alt=None,
sweep_mode='ppi',
sweep_az_thresh=10.0,
sweep_el_thresh=0.5,
):
"""
Produces a Py-ART radar object based on data in the ACT Xarray dataset.
Parameters
----------
ds : xarray.DataSet
ACT Xarray dataset.
variables : list
List of variables to add to the radar object, will default to all
variables.
sweep : string
Name of variable that has sweep information. If none, will try and
calculate from the azimuth and elevation.
azimuth : string
Name of azimuth variable. Will try and find one if none given.
elevation : string
Name of elevation variable. Will try and find one if none given.
range_var : string
Name of the range variable. Will try and find one if none given.
sweep_start : string
Name of variable with sweep start indices.
sweep_end : string
Name of variable with sweep end indices.
lat : string
Name of latitude variable. Will try and find one if none given.
lon : string
Name of longitude variable. Will try and find one if none given.
alt : string
Name of altitude variable. Will try and find one if none given.
sweep_mode : string
Type of scan. Defaults to PPI.
sweep_az_thresh : float
If calculating sweep numbers, the maximum change in azimuth before new
sweep.
sweep_el_thresh : float
If calculating sweep numbers, the maximum change in elevation before
new sweep.
Returns
-------
radar : radar.Radar
Py-ART Radar Object.
"""
if not PYART_AVAILABLE:
raise ImportError(
'Py-ART needs to be installed on your system to convert to ' 'Py-ART Object.'
)
else:
import pyart
# Get list of variables if none provided
if variables is None:
variables = list(ds.keys())
# Determine the sweeps if not already in a variable$a
if sweep is None:
swp = np.zeros(ds.sizes['time'])
for key in ds.variables.keys():
if len(ds.variables[key].shape) == 2:
total_rays = ds.variables[key].shape[0]
break
nsweeps = int(total_rays / ds.variables['time'].shape[0])
else:
swp = ds[sweep].values
nsweeps = ds[sweep].values
# Get coordinate variables
if lat is None:
lat = [s for s in variables if 'latitude' in s]
if len(lat) == 0:
lat = [s for s in variables if 'lat' in s]
if len(lat) == 0:
raise ValueError(
'Latitude variable not set and could not be ' 'discerned from the data.'
)
else:
lat = lat[0]
if lon is None:
lon = [s for s in variables if 'longitude' in s]
if len(lon) == 0:
lon = [s for s in variables if 'lon' in s]
if len(lon) == 0:
raise ValueError(
'Longitude variable not set and could not be ' 'discerned from the data.'
)
else:
lon = lon[0]
if alt is None:
alt = [s for s in variables if 'altitude' in s]
if len(alt) == 0:
alt = [s for s in variables if 'alt' in s]
if len(alt) == 0:
raise ValueError(
'Altitude variable not set and could not be ' 'discerned from the data.'
)
else:
alt = alt[0]
# Get additional variable names if none provided
if azimuth is None:
azimuth = [s for s in sorted(variables) if 'azimuth' in s][0]
if len(azimuth) == 0:
raise ValueError(
'Azimuth variable not set and could not be ' 'discerned from the data.'
)
if elevation is None:
elevation = [s for s in sorted(variables) if 'elevation' in s][0]
if len(elevation) == 0:
raise ValueError(
'Elevation variable not set and could not be ' 'discerned from the data.'
)
if range_var is None:
range_var = [s for s in sorted(variables) if 'range' in s][0]
if len(range_var) == 0:
raise ValueError('Range variable not set and could not be ' 'discerned from the data.')
# Calculate the sweep indices if not passed in
if sweep_start is None and sweep_end is None:
az_diff = np.abs(np.diff(ds[azimuth].values))
az_idx = az_diff > sweep_az_thresh
el_diff = np.abs(np.diff(ds[elevation].values))
el_idx = el_diff > sweep_el_thresh
# Create index list
az_index = list(np.where(az_idx)[0] + 1)
el_index = list(np.where(el_idx)[0] + 1)
index = sorted(az_index + el_index)
index.insert(0, 0)
index += [ds.sizes['time']]
sweep_start_index = []
sweep_end_index = []
for i in range(len(index) - 1):
sweep_start_index.append(index[i])
sweep_end_index.append(index[i + 1] - 1)
swp[index[i] : index[i + 1]] = i
else:
sweep_start_index = ds[sweep_start].values
sweep_end_index = ds[sweep_end].values
if sweep is None:
for i in range(len(sweep_start_index)):
swp[sweep_start_index[i] : sweep_end_index[i]] = i
radar = pyart.testing.make_empty_ppi_radar(ds.sizes[range_var], ds.sizes['time'], nsweeps)
radar.time['data'] = np.array(ds['time'].values)
# Add lat, lon, and alt
radar.latitude['data'] = np.array(ds[lat].values)
radar.longitude['data'] = np.array(ds[lon].values)
radar.altitude['data'] = np.array(ds[alt].values)
# Add sweep information
radar.sweep_number['data'] = swp
radar.sweep_start_ray_index['data'] = sweep_start_index
radar.sweep_end_ray_index['data'] = sweep_end_index
radar.sweep_mode['data'] = np.array(sweep_mode)
radar.scan_type = sweep_mode
# Add elevation, azimuth, etc...
radar.azimuth['data'] = np.array(ds[azimuth])
radar.elevation['data'] = np.array(ds[elevation])
radar.fixed_angle['data'] = np.array(ds[elevation].values[0])
radar.range['data'] = np.array(ds[range_var].values)
# Calculate radar points in lat/lon
radar.init_gate_altitude()
radar.init_gate_longitude_latitude()
# Add the fields to the radar object
fields = {}
for v in variables:
ref_dict = pyart.config.get_metadata(v)
ref_dict['data'] = np.array(ds[v].values)
fields[v] = ref_dict
radar.fields = fields
return radar
def convert_to_potential_temp(
ds=None,
temp_var_name=None,
press_var_name=None,
temperature=None,
pressure=None,
temp_var_units=None,
press_var_units=None,
):
"""
Converts temperature to potential temperature.
Parameters
----------
ds : xarray.DataSet
ACT Xarray dataset
temp_var_name : str
Temperature variable name in the ACT Xarray dataset containing
temperature data to convert.
press_var_name : str
Pressure variable name in the ACT Xarray dataset containing the
pressure data to use in conversion. If not set or set to None will
use values from pressure keyword.
pressure : int, float, numpy array
Optional pressure values to use instead of using values from xarray
dataset. If set must also set press_var_units keyword.
temp_var_units : string
Pint recognized units string for temperature data. If set to None will
use the units attribute under temperature variable in ds.
press_var_units : string
Pint recognized units string for pressure data. If set to None will
use the units attribute under pressure variable in the dataset. If using
the pressure keyword this must be set.
Returns
-------
potential_temperature : None, int, float, numpy array
The converted temperature to potential temperature or None if something
goes wrong.
References
----------
May, R. M., Arms, S. C., Marsh, P., Bruning, E., Leeman, J. R., Goebbert,
K., Thielen, J. E., and Bruick, Z., 2021: MetPy: A Python Package for
Meteorological Data. Unidata, https://github.com/Unidata/MetPy,
doi:10.5065/D6WW7G29.
"""
potential_temp = None
if temp_var_units is None and temp_var_name is not None:
temp_var_units = ds[temp_var_name].attrs['units']
if press_var_units is None and press_var_name is not None:
press_var_units = ds[press_var_name].attrs['units']
if press_var_units is None:
raise ValueError(
"Need to provide 'press_var_units' keyword " "when using 'pressure' keyword"
)
if temp_var_units is None:
raise ValueError(
"Need to provide 'temp_var_units' keyword " "when using 'temperature' keyword"
)
if temperature is not None:
temperature = metpy.units.units.Quantity(temperature, temp_var_units)
else:
temperature = metpy.units.units.Quantity(ds[temp_var_name].values, temp_var_units)
if pressure is not None:
pressure = metpy.units.units.Quantity(pressure, press_var_units)
else:
pressure = metpy.units.units.Quantity(ds[press_var_name].values, press_var_units)
with warnings.catch_warnings():
warnings.filterwarnings('ignore', category=RuntimeWarning)
potential_temp = metpy.calc.potential_temperature(pressure, temperature)
potential_temp = potential_temp.to(temp_var_units).magnitude
return potential_temp
def height_adjusted_temperature(
ds=None,
temp_var_name=None,
height_difference=0,
height_units='m',
press_var_name=None,
temperature=None,
temp_var_units=None,
pressure=101.325,
press_var_units='kPa',
):
"""
Converts temperature for change in height.
Parameters
----------
ds : xarray.DataSet, None
Optional Xarray dataset for retrieving pressure and temperature values.
Not needed if using temperature keyword.
temp_var_name : str, None
Optional temperature variable name in the Xarray dataset containing the
temperature data to use in conversion. If not set or set to None will
use values from temperature keyword.
height_difference : int, float
Required difference in height to adjust pressure values. Positive
values to increase height negative values to decrease height.
height_units : str
Units of height value.
press_var_name : str, None
Optional pressure variable name in the Xarray dataset containing the
pressure data to use in conversion. If not set or set to None will
use values from pressure keyword.
temperature : int, float, numpy array, None
Optional temperature values to use instead of values in the dataset.
temp_var_units : str, None
Pint recognized units string for temperature data. If set to None will
use the units attribute under temperature variable in the dataset.
If using the temperature keyword this must be set.
pressure : int, float, numpy array, None
Optional pressure values to use instead of values in the dataset.
Default value of sea level pressure is set for ease of use.
press_var_units : str, None
Pint recognized units string for pressure data. If set to None will
use the units attribute under pressure variable in the dataset.
If using the pressure keyword this must be set. Default value of
sea level pressure is set for ease of use.
Returns
-------
adjusted_temperature : None, int, float, numpy array
The height adjusted temperature or None if something goes wrong.
References
----------
May, R. M., Arms, S. C., Marsh, P., Bruning, E., Leeman, J. R., Goebbert,
K., Thielen, J. E., and Bruick, Z., 2021: MetPy: A Python Package for
Meteorological Data. Unidata, https://github.com/Unidata/MetPy,
doi:10.5065/D6WW7G29.
"""
adjusted_temperature = None
if temp_var_units is None and temperature is None:
temp_var_units = ds[temp_var_name].attrs['units']
if temp_var_units is None:
raise ValueError(
"Need to provide 'temp_var_units' keyword when " 'providing temperature keyword values.'
)
if temperature is not None:
temperature = metpy.units.units.Quantity(temperature, temp_var_units)
else:
temperature = metpy.units.units.Quantity(ds[temp_var_name].values, temp_var_units)
if press_var_name is not None:
pressure = metpy.units.units.Quantity(ds[press_var_name].values, press_var_units)
else:
pressure = metpy.units.units.Quantity(pressure, press_var_units)
adjusted_pressure = height_adjusted_pressure(
height_difference=height_difference,
height_units=height_units,
pressure=pressure.magnitude,
press_var_units=press_var_units,
)
adjusted_pressure = metpy.units.units.Quantity(adjusted_pressure, press_var_units)
with warnings.catch_warnings():
warnings.filterwarnings('ignore', category=RuntimeWarning)
adjusted_temperature = metpy.calc.dry_lapse(adjusted_pressure, temperature, pressure)
adjusted_temperature = adjusted_temperature.to(temp_var_units).magnitude
return adjusted_temperature
def height_adjusted_pressure(
ds=None,
press_var_name=None,
height_difference=0,
height_units='m',
pressure=None,
press_var_units=None,
):
"""
Converts pressure for change in height.
Parameters
----------
ds : xarray.DataSet, None
Optional Xarray dataset for retrieving pressure values. Not needed if
using pressure keyword.
press_var_name : str, None
Optional pressure variable name in the Xarray dataset containing the
pressure data to use in conversion. If not set or set to None will
use values from pressure keyword.
height_difference : int, float
Required difference in height to adjust pressure values. Positive
values to increase height negative values to decrease height.
height_units : str
Units of height value.
pressure : int, float, numpy array, None
Optional pressure values to use instead of values in the dataset.
press_var_units : str, None
Pint recognized units string for pressure data. If set to None will
use the units attribute under pressure variable in the dataset.
If using the pressure keyword this must be set.
Returns
-------
adjusted_pressure : None, int, float, numpy array
The height adjusted pressure or None if something goes wrong.
References
----------
May, R. M., Arms, S. C., Marsh, P., Bruning, E., Leeman, J. R., Goebbert,
K., Thielen, J. E., and Bruick, Z., 2021: MetPy: A Python Package for
Meteorological Data. Unidata, https://github.com/Unidata/MetPy,
doi:10.5065/D6WW7G29.
"""
adjusted_pressure = None
if press_var_units is None and pressure is None:
press_var_units = ds[press_var_name].attrs['units']
if press_var_units is None:
raise ValueError(
"Need to provide 'press_var_units' keyword when " 'providing pressure keyword values.'
)
if pressure is not None:
pressure = metpy.units.units.Quantity(pressure, press_var_units)
else:
pressure = metpy.units.units.Quantity(ds[press_var_name].values, press_var_units)
height_difference = metpy.units.units.Quantity(height_difference, height_units)
with warnings.catch_warnings():
warnings.filterwarnings('ignore', category=RuntimeWarning)
adjusted_pressure = metpy.calc.add_height_to_pressure(pressure, height_difference)
adjusted_pressure = adjusted_pressure.to(press_var_units).magnitude
return adjusted_pressure
def arm_site_location_search(site_code='sgp', facility_code=None):
"""
Parameters
----------
site_code : str
ARM site code to retrieve facilities and coordinate information. Example and default
is 'sgp'.
facility_code : str or None
Facility code or codes for the ARM site provided. If None is provided, all facilities are returned.
Example string for multiple facilities is 'A4,I5'.
Returns
-------
coord_dict : dict
A dictionary containing the facility chosen coordinate information or all facilities
if None for facility_code and their respective coordinates.
"""
headers = {
'Content-Type': 'application/json',
}
# Return all facilities if facility_code is None else set the query to include
# facility search
if facility_code is None:
query = "site_code:" + site_code
else:
query = "site_code:" + site_code + " AND facility_code:" + facility_code
# Search aggregation for elastic search
json_data = {
"aggs": {
"distinct_facility_code": {
"terms": {
"field": "facility_code.keyword",
"order": {
"_key": "asc"
},
"size": 7000,
},
"aggs": {
"hits": {
"top_hits": {
"_source": [
"site_type",
"site_code",
"facility_code",
"location",
],
"size": 1
},
},
},
},
},
"size": 0,
"query": {
"query_string": {
"query": query,
},
},
}
# Uses requests to grab metadata from arm.gov.
response = requests.get('https://adc.arm.gov/elastic/metadata/_search', headers=headers, json=json_data)
# Loads the text to a dictionary
response_dict = json.loads(response.text)
# Searches dictionary for the site, facility and coordinate information.
coord_dict = {}
# Loop through each facility.
for i in range(len(response_dict['aggregations']['distinct_facility_code']['buckets'])):
site_info = response_dict['aggregations']['distinct_facility_code']['buckets'][i]['hits']['hits']['hits'][0]['_source']
site = site_info['site_code']
facility = site_info['facility_code']
# Some sites do not contain coordinate information, return None if that is the case.
if site_info['location'] is None:
coords = {'latitude': None,
'longitude': None}
else:
lat, lon = site_info['location'].split(',')
lat = float(lat)
lon = float(lon)
coords = {'latitude': lat,
'longitude': lon}
coord_dict.setdefault(site + ' ' + facility, coords)
return coord_dict
<|code_end|>
|
Adding plotting options to Skew-T
Currently the skew-t plotting method has options to include surface-based parcel trace and shading for CAPE and CIN values. MetPy has some additional skew-T plotting features that might be useful:
- `plot_dry_adiabats` - Plots dry adiabat/theta lines
- `plot_mixing_lines` - Plots mixing ratio lines
- `plot_moist_adiabats` - Plots moist adiabats
Would these be useful to add to the skew-t plotting methods in ACT?
|
act/plotting/skewtdisplay.py
<|code_start|>"""
Stores the class for SkewTDisplay.
"""
import warnings
from copy import deepcopy
import matplotlib.pyplot as plt
# Import third party libraries
import metpy
import metpy.calc as mpcalc
import numpy as np
import scipy
from metpy.plots import Hodograph, SkewT
from metpy.units import units
from ..retrievals import calculate_stability_indicies
# Import Local Libs
from ..utils import datetime_utils as dt_utils
from .plot import Display
class SkewTDisplay(Display):
"""
A class for making Skew-T plots.
This is inherited from the :func:`act.plotting.Display`
class and has therefore has the same attributes as that class.
See :func:`act.plotting.Display`
for more information. There are no additional attributes or parameters
to this class.
In order to create Skew-T plots, ACT needs the MetPy package to be
installed on your system. More information about
MetPy go here: https://unidata.github.io/MetPy/latest/index.html.
Examples
--------
Here is an example of how to make a Skew-T plot using ACT:
.. code-block :: python
sonde_ds = act.io.arm.read_arm_netcdf(
act.tests.sample_files.EXAMPLE_SONDE1)
skewt = act.plotting.SkewTDisplay(sonde_ds)
skewt.plot_from_u_and_v('u_wind', 'v_wind', 'pres', 'tdry', 'dp')
plt.show()
"""
def __init__(self, ds, subplot_shape=(1,), subplot=None, ds_name=None, set_fig=None, **kwargs):
# We want to use our routine to handle subplot adding, not the main
# one
new_kwargs = kwargs.copy()
super().__init__(ds, None, ds_name, subplot_kw=dict(projection='skewx'),
**new_kwargs)
# Make a SkewT object for each subplot
self.add_subplots(subplot_shape, set_fig=set_fig, subplot=subplot, **kwargs)
def add_subplots(self, subplot_shape=(1,), set_fig=None, subplot=None, **kwargs):
"""
Adds subplots to the Display object. The current
figure in the object will be deleted and overwritten.
Parameters
----------
subplot_shape : 1 or 2D tuple, list, or array
The structure of the subplots in (rows, cols).
subplot_kw : dict, optional
The kwargs to pass into fig.subplots.
set_fig : matplotlib figure, optional
Figure to pass to SkewT
**kwargs : keyword arguments
Any other keyword arguments that will be passed
into :func:`matplotlib.pyplot.figure` when the figure
is made. The figure is only made if the *fig*
property is None. See the matplotlib
documentation for further details on what keyword
arguments are available.
"""
del self.axes
if self.fig is None and set_fig is None:
self.fig = plt.figure(**kwargs)
if set_fig is not None:
self.fig = set_fig
self.SkewT = np.empty(shape=subplot_shape, dtype=SkewT)
self.axes = np.empty(shape=subplot_shape, dtype=plt.Axes)
if len(subplot_shape) == 1:
for i in range(subplot_shape[0]):
if subplot is None:
subplot_tuple = (subplot_shape[0], 1, i + 1)
else:
subplot_tuple = subplot
self.SkewT[i] = SkewT(fig=self.fig, subplot=subplot_tuple)
self.axes[i] = self.SkewT[i].ax
elif len(subplot_shape) == 2:
for i in range(subplot_shape[0]):
for j in range(subplot_shape[1]):
subplot_tuple = (
subplot_shape[0],
subplot_shape[1],
i * subplot_shape[1] + j + 1,
)
self.SkewT[i, j] = SkewT(fig=self.fig, subplot=subplot_tuple)
self.axes[i, j] = self.SkewT[i, j].ax
else:
raise ValueError('Subplot shape must be 1 or 2D!')
def set_xrng(self, xrng, subplot_index=(0,)):
"""
Sets the x range of the plot.
Parameters
----------
xrng : 2 number array.
The x limits of the plot.
subplot_index : 1 or 2D tuple, list, or array
The index of the subplot to set the x range of.
"""
if self.axes is None:
raise RuntimeError('set_xrng requires the plot to be displayed.')
if not hasattr(self, 'xrng') or np.all(self.xrng == 0):
if len(self.axes.shape) == 2:
self.xrng = np.zeros((self.axes.shape[0], self.axes.shape[1], 2))
else:
self.xrng = np.zeros((self.axes.shape[0], 2))
self.axes[subplot_index].set_xlim(xrng)
self.xrng[subplot_index, :] = np.array(xrng)
def set_yrng(self, yrng, subplot_index=(0,)):
"""
Sets the y range of the plot.
Parameters
----------
yrng : 2 number array
The y limits of the plot.
subplot_index : 1 or 2D tuple, list, or array
The index of the subplot to set the x range of.
"""
if self.axes is None:
raise RuntimeError('set_yrng requires the plot to be displayed.')
if not hasattr(self, 'yrng') or np.all(self.yrng == 0):
if len(self.axes.shape) == 2:
self.yrng = np.zeros((self.axes.shape[0], self.axes.shape[1], 2))
else:
self.yrng = np.zeros((self.axes.shape[0], 2))
if not hasattr(self, 'yrng') and len(self.axes.shape) == 2:
self.yrng = np.zeros((self.axes.shape[0], self.axes.shape[1], 2))
elif not hasattr(self, 'yrng') and len(self.axes.shape) == 1:
self.yrng = np.zeros((self.axes.shape[0], 2))
if yrng[0] == yrng[1]:
yrng[1] = yrng[1] + 1
self.axes[subplot_index].set_ylim(yrng)
self.yrng[subplot_index, :] = yrng
def plot_from_spd_and_dir(
self, spd_field, dir_field, p_field, t_field, td_field, dsname=None, **kwargs
):
"""
This plot will make a sounding plot from wind data that is given
in speed and direction.
Parameters
----------
spd_field : str
The name of the field corresponding to the wind speed.
dir_field : str
The name of the field corresponding to the wind direction
in degrees from North.
p_field : str
The name of the field containing the atmospheric pressure.
t_field : str
The name of the field containing the atmospheric temperature.
td_field : str
The name of the field containing the dewpoint.
dsname : str or None
The name of the datastream to plot. Set to None to make ACT
attempt to automatically determine this.
kwargs : dict
Additional keyword arguments will be passed into
:func:`act.plotting.SkewTDisplay.plot_from_u_and_v`
Returns
-------
ax : matplotlib axis handle
The matplotlib axis handle corresponding to the plot.
"""
if dsname is None and len(self._ds.keys()) > 1:
raise ValueError(
'You must choose a datastream when there are 2 '
'or more datasets in the TimeSeriesDisplay '
'object.'
)
elif dsname is None:
dsname = list(self._ds.keys())[0]
# Make temporary field called tempu, tempv
spd = self._ds[dsname][spd_field].values * units(self._ds[dsname][spd_field].attrs['units'])
dir = self._ds[dsname][dir_field].values * units(self._ds[dsname][dir_field].attrs['units'])
tempu, tempv = mpcalc.wind_components(spd, dir)
self._ds[dsname]['temp_u'] = deepcopy(self._ds[dsname][spd_field])
self._ds[dsname]['temp_v'] = deepcopy(self._ds[dsname][spd_field])
self._ds[dsname]['temp_u'].values = tempu
self._ds[dsname]['temp_v'].values = tempv
the_ax = self.plot_from_u_and_v(
'temp_u', 'temp_v', p_field, t_field, td_field, dsname, **kwargs
)
del self._ds[dsname]['temp_u'], self._ds[dsname]['temp_v']
return the_ax
def plot_from_u_and_v(
self,
u_field,
v_field,
p_field,
t_field,
td_field,
dsname=None,
subplot_index=(0,),
p_levels_to_plot=None,
show_parcel=True,
shade_cape=True,
shade_cin=True,
set_title=None,
smooth_p=3,
plot_barbs_kwargs=dict(),
plot_kwargs=dict(),
):
"""
This function will plot a Skew-T from a sounding dataset. The wind
data must be given in u and v.
Parameters
----------
u_field : str
The name of the field containing the u component of the wind.
v_field : str
The name of the field containing the v component of the wind.
p_field : str
The name of the field containing the pressure.
t_field : str
The name of the field containing the temperature.
td_field : str
The name of the field containing the dewpoint temperature.
dsname : str or None
The name of the datastream to plot. Set to None to make ACT
attempt to automatically determine this.
subplot_index : tuple
The index of the subplot to make the plot on.
p_levels_to_plot : 1D array
The pressure levels to plot the wind barbs on. Set to None
to have ACT to use neatly spaced defaults of
25, 50, 75, 100, 150, 200, 250, 300, 400, 500, 600, 700, 750, 800,
850, 900, 950, and 1000 hPa.
show_parcel : bool
Set to true to calculate the profile a parcel takes through the atmosphere
using the metpy.calc.parcel_profile function. From their documentation,
the parcel starts at the surface temperature and dewpoint, is lifted up
dry adiabatically to the LCL and then moist adiabatically from there.
shade_cape : bool
Set to True to shade the CAPE red.
shade_cin : bool
Set to True to shade the CIN blue.
set_title : None or str
The title of the plot is set to this. Set to None to use
a default title.
smooth_p : int
If pressure is not in descending order, will smooth the data
using this many points to try and work around the issue.
Default is 3 but inthe pbl retrieval code we have to default to 5 at times
plot_barbs_kwargs : dict
Additional keyword arguments to pass into MetPy's
SkewT.plot_barbs.
plot_kwargs : dict
Additional keyword arguments to pass into MetPy's
SkewT.plot.
Returns
-------
ax : matplotlib axis handle
The axis handle to the plot.
References
----------
May, R. M., Arms, S. C., Marsh, P., Bruning, E., Leeman, J. R., Goebbert, K., Thielen, J. E.,
Bruick, Z., and Camron, M. D., 2023: MetPy: A Python Package for Meteorological Data.
Unidata, Unidata/MetPy, doi:10.5065/D6WW7G29.
"""
if dsname is None and len(self._ds.keys()) > 1:
raise ValueError(
'You must choose a datastream when there are 2 '
'or more datasets in the TimeSeriesDisplay '
'object.'
)
elif dsname is None:
dsname = list(self._ds.keys())[0]
if p_levels_to_plot is None:
p_levels_to_plot = np.array(
[
25.0,
50.0,
75.0,
100.0,
150.0,
200.0,
250.0,
300.0,
400.0,
500.0,
600.0,
700.0,
750.0,
800.0,
850.0,
900.0,
950.0,
1000.0,
]
) * units('hPa')
# Get pressure and smooth if not in order
p = self._ds[dsname][p_field]
if not all(p[i] <= p[i + 1] for i in range(len(p) - 1)):
if 'time' in self._ds:
self._ds[dsname][p_field] = (
self._ds[dsname][p_field].rolling(time=smooth_p, min_periods=1, center=True).mean()
)
p = self._ds[dsname][p_field]
p_units = self._ds[dsname][p_field].attrs['units']
p = p.values * getattr(units, p_units)
if len(np.shape(p)) == 2:
p = np.reshape(p, p.shape[0] * p.shape[1])
T = self._ds[dsname][t_field]
T_units = self._ds[dsname][t_field].attrs['units']
if T_units == 'C':
T_units = 'degC'
T = T.values * getattr(units, T_units)
if len(np.shape(T)) == 2:
T = np.reshape(T, T.shape[0] * T.shape[1])
Td = self._ds[dsname][td_field]
Td_units = self._ds[dsname][td_field].attrs['units']
if Td_units == 'C':
Td_units = 'degC'
Td = Td.values * getattr(units, Td_units)
if len(np.shape(Td)) == 2:
Td = np.reshape(Td, Td.shape[0] * Td.shape[1])
u = self._ds[dsname][u_field]
u_units = self._ds[dsname][u_field].attrs['units']
u = u.values * getattr(units, u_units)
if len(np.shape(u)) == 2:
u = np.reshape(u, u.shape[0] * u.shape[1])
v = self._ds[dsname][v_field]
v_units = self._ds[dsname][v_field].attrs['units']
v = v.values * getattr(units, v_units)
if len(np.shape(v)) == 2:
v = np.reshape(v, v.shape[0] * v.shape[1])
u_red = np.zeros_like(p_levels_to_plot) * getattr(units, u_units)
v_red = np.zeros_like(p_levels_to_plot) * getattr(units, v_units)
# Check p_levels_to_plot units, and convert to p units if needed
if not hasattr(p_levels_to_plot, 'units'):
p_levels_to_plot = p_levels_to_plot * getattr(units, p_units)
else:
p_levels_to_plot = p_levels_to_plot.to(p_units)
for i in range(len(p_levels_to_plot)):
index = np.argmin(np.abs(p_levels_to_plot[i] - p))
u_red[i] = u[index].magnitude * getattr(units, u_units)
v_red[i] = v[index].magnitude * getattr(units, v_units)
self.SkewT[subplot_index].plot(p, T, 'r', **plot_kwargs)
self.SkewT[subplot_index].plot(p, Td, 'g', **plot_kwargs)
self.SkewT[subplot_index].plot_barbs(
p_levels_to_plot.magnitude, u_red, v_red, **plot_barbs_kwargs
)
# Metpy fix if Pressure does not decrease monotonically in
# your sounding.
try:
prof = mpcalc.parcel_profile(p, T[0], Td[0]).to('degC')
except metpy.calc.exceptions.InvalidSoundingError:
p = scipy.ndimage.median_filter(p, 3, output=float)
p = metpy.units.units.Quantity(p, p_units)
prof = mpcalc.parcel_profile(p, T[0], Td[0]).to('degC')
if show_parcel:
# Only plot where prof > T
lcl_pressure, lcl_temperature = mpcalc.lcl(p[0], T[0], Td[0])
self.SkewT[subplot_index].plot(
lcl_pressure, lcl_temperature, 'ko', markerfacecolor='black', **plot_kwargs
)
self.SkewT[subplot_index].plot(p, prof, 'k', linewidth=2, **plot_kwargs)
if shade_cape:
self.SkewT[subplot_index].shade_cape(p, T, prof, linewidth=2)
if shade_cin:
self.SkewT[subplot_index].shade_cin(p, T, prof, linewidth=2)
# Set Title
if set_title is None:
if 'time' in self._ds[dsname]:
title_time = dt_utils.numpy_to_arm_date(self._ds[dsname].time.values[0]),
elif '_file_dates' in self._ds[dsname].attrs:
title_time = self._ds[dsname].attrs['_file_dates'][0]
else:
title_time = ''
title_list = [dsname, 'on', title_time]
set_title = ' '.join(' '.join(x) for x in title_list)
self.axes[subplot_index].set_title(set_title)
# Set Y Limit
our_data = p.magnitude
if np.isfinite(our_data).any():
yrng = [np.nanmax(our_data), np.nanmin(our_data)]
else:
yrng = [1000.0, 100.0]
self.set_yrng(yrng, subplot_index)
# Set X Limit
xrng = [np.nanmin(T.magnitude) - 10.0, np.nanmax(T.magnitude) + 10.0]
self.set_xrng(xrng, subplot_index)
return self.axes[subplot_index]
def plot_hodograph(
self,
spd_field,
dir_field,
color_field=None,
set_fig=None,
set_axes=None,
component_range=80,
dsname=None,
uv_flag=False,
):
"""
This will plot a hodograph from the radiosonde wind data using
MetPy
Parameters
----------
spd_field : str
The name of the field corresponding to the wind speed.
dir_field : str
The name of the field corresponding to the wind direction
in degrees from North.
color_field : str, optional
The name of the field if wanting to shade by another variable
set_fig : matplotlib figure, optional
The figure to plot on
set_axes : matplotlib axes, optional
The specific axes to plot on
component_range : int
Range of the hodograph. Default is 80
dsname : str
Name of the datastream to plot if multiple in the plot object
uv_flag : boolean
If set to True, spd_field and dir_field will be treated as the
U and V wind variable names
Returns
-------
self.axes : matplotlib axes
"""
if dsname is None and len(self._ds.keys()) > 1:
raise ValueError(
'You must choose a datastream when there are 2 '
'or more datasets in the TimeSeriesDisplay '
'object.'
)
elif dsname is None:
dsname = list(self._ds.keys())[0]
# Get the current plotting axis
if set_fig is not None:
self.fig = set_fig
if set_axes is not None:
self.axes = set_axes
if self.fig is None:
self.fig = plt.figure()
if self.axes is None:
self.axes = np.array([plt.axes()])
self.fig.add_axes(self.axes[0])
# Calculate u/v wind components from speed/direction
if uv_flag is False:
spd = self._ds[dsname][spd_field].values * units(
self._ds[dsname][spd_field].attrs['units']
)
dir = self._ds[dsname][dir_field].values * units(
self._ds[dsname][dir_field].attrs['units']
)
u, v = mpcalc.wind_components(spd, dir)
else:
u = self._ds[dsname][spd_field].values * units(
self._ds[dsname][spd_field].attrs['units']
)
v = self._ds[dsname][dir_field].values * units(
self._ds[dsname][dir_field].attrs['units']
)
# Plot out the data using the Hodograph method
h = Hodograph(self.axes, component_range=component_range)
h.add_grid(increment=20)
if color_field is None:
h.plot(u, v)
else:
data = self._ds[dsname][color_field].values * units(
self._ds[dsname][color_field].attrs['units']
)
h.plot_colormapped(u, v, data)
return self.axes
def add_stability_info(
self,
temp_name='tdry',
td_name='dp',
p_name='pres',
overwrite_data=None,
add_data=None,
set_fig=None,
set_axes=None,
dsname=None,
):
"""
This plot will make a sounding plot from wind data that is given
in speed and direction.
Parameters
----------
temp_name : str
The name of the temperature field.
td_name : str
The name of the dewpoint field.
p_name : str
The name of the pressure field.
overwrite_data : dict
A disctionary of variables/values to write out instead
of the ones calculated by MetPy. Needs to be of the form
.. code-block:: python
overwrite_data={'LCL': 234, 'CAPE': 25}
...
add_data : dict
A dictionary of variables and values to write out in
addition to the MetPy calculated ones
set_fig : matplotlib figure, optional
The figure to plot on
set_axes : matplotlib axes, optional
The specific axes to plot on
dsname : str
Name of the datastream to plot if multiple in the plot object
Returns
-------
self.axes : matplotlib axes
"""
if dsname is None and len(self._ds.keys()) > 1:
raise ValueError(
'You must choose a datastream when there are 2 '
'or more datasets in the TimeSeriesDisplay '
'object.'
)
elif dsname is None:
dsname = list(self._ds.keys())[0]
# Get the current plotting axis
if set_fig is not None:
self.fig = set_fig
if set_axes is not None:
self.axes = set_axes
if self.fig is None:
self.fig = plt.figure()
if self.axes is None:
self.axes = np.array([plt.axes()])
self.fig.add_axes(self.axes[0])
self.axes.spines['top'].set_visible(False)
self.axes.spines['right'].set_visible(False)
self.axes.spines['bottom'].set_visible(False)
self.axes.spines['left'].set_visible(False)
self.axes.get_xaxis().set_ticks([])
self.axes.get_yaxis().set_ticks([])
ct = 0
if overwrite_data is None:
# Calculate stability indicies
ds_sonde = calculate_stability_indicies(
self._ds[dsname],
temp_name=temp_name,
td_name=td_name,
p_name=p_name,
)
# Add MetPy calculated variables to the list
variables = {
'lifted_index': 'Lifted Index',
'surface_based_cape': 'SBCAPE',
'surface_based_cin': 'SBCIN',
'most_unstable_cape': 'MUCAPE',
'most_unstable_cin': 'MUCIN',
'lifted_condensation_level_temperature': 'LCL Temp',
'lifted_condensation_level_pressure': 'LCL Pres',
}
for i, v in enumerate(variables):
var_string = str(np.round(ds_sonde[v].values, 2))
self.axes.text(
-0.05,
(0.98 - (0.1 * i)),
variables[v] + ': ',
transform=self.axes.transAxes,
fontsize=10,
verticalalignment='top',
)
self.axes.text(
0.95,
(0.98 - (0.1 * i)),
var_string,
transform=self.axes.transAxes,
fontsize=10,
verticalalignment='top',
horizontalalignment='right',
)
ct += 1
else:
# If overwrite_data is set, the user passes in their own dictionary
for i, v in enumerate(overwrite_data):
var_string = str(np.round(overwrite_data[v], 2))
self.axes.text(
-0.05,
(0.98 - (0.1 * i)),
v + ': ',
transform=self.axes.transAxes,
fontsize=10,
verticalalignment='top',
)
self.axes.text(
0.95,
(0.98 - (0.1 * i)),
var_string,
transform=self.axes.transAxes,
fontsize=10,
verticalalignment='top',
horizontalalignment='right',
)
# User can also add variables to the existing ones calculated by MetPy
if add_data is not None:
for i, v in enumerate(add_data):
var_string = str(np.round(add_data[v], 2))
self.axes.text(
-0.05,
(0.98 - (0.1 * (i + ct))),
v + ': ',
transform=self.axes.transAxes,
fontsize=10,
verticalalignment='top',
)
self.axes.text(
0.95,
(0.98 - (0.1 * (i + ct))),
var_string,
transform=self.axes.transAxes,
fontsize=10,
verticalalignment='top',
horizontalalignment='right',
)
return self.axes
def plot_enhanced_skewt(
self,
spd_name='wspd',
dir_name='deg',
temp_name='tdry',
td_name='dp',
p_name='pres',
overwrite_data=None,
add_data=None,
color_field=None,
component_range=80,
uv_flag=False,
dsname=None,
figsize=(14, 10),
layout='constrained',
):
"""
This will plot an enhanced Skew-T plot with a Hodograph on the top right
and the stability parameters on the lower right. This will create a new
figure so that one does not need to be defined through subplot_shape.
Requires Matplotlib v 3.7 and higher
Parameters
----------
spd_name : str
The name of the field corresponding to the wind speed.
dir_name : str
The name of the field corresponding to the wind direction
in degrees from North.
temp_name : str
The name of the temperature field.
td_name : str
The name of the dewpoint field.
p_name : str
The name of the pressure field.
overwrite_data : dict
A disctionary of variables/values to write out instead
of the ones calculated by MetPy. Needs to be of the form
.. code-block:: python
overwrite_data={'LCL': 234, 'CAPE': 25}
...
add_data : dict
A dictionary of variables and values to write out in
addition to the MetPy calculated ones
color_field : str, optional
The name of the field if wanting to shade by another variable
component_range : int
Range of the hodograph. Default is 80
uv_flag : boolean
If set to True, spd_field and dir_field will be treated as the
U and V wind variable names
dsname : str
Name of the datastream to plot if multiple in the plot object
figsize : tuple
Figure size for the plot
layout : str
String to pass to matplotlib.figure.Figure object layout keyword
argument. Choice of 'constrained,' 'compressed,' 'tight,' or None.
Default is 'constrained'.
Returns
-------
self.axes : matplotlib axes
"""
# Set up the figure and axes
# Close existing figure as a new one will be created
plt.close('all')
subplot_kw = {'a': {'projection': 'skewx'}}
fig, axs = plt.subplot_mosaic(
[['a', 'a', 'b'], ['a', 'a', 'b'], ['a', 'a', 'c'], ['a', 'a', 'c']],
layout=layout,
per_subplot_kw=subplot_kw,
)
self.fig = fig
self.axes = axs
# Plot out the Skew-T
display = SkewTDisplay(self._ds, set_fig=fig, subplot=axs['a'], figsize=figsize)
if uv_flag is True:
display.plot_from_u_and_v(spd_name, dir_name, p_name, temp_name, td_name)
else:
display.plot_from_spd_and_dir(spd_name, dir_name, p_name, temp_name, td_name)
# Plot the hodograph
display.plot_hodograph(
spd_name,
dir_name,
set_axes=axs['b'],
color_field=color_field,
component_range=component_range,
dsname=dsname,
uv_flag=uv_flag,
)
# Add Stability information
display.add_stability_info(
set_axes=axs['c'],
temp_name=temp_name,
td_name=td_name,
p_name=p_name,
overwrite_data=overwrite_data,
add_data=add_data,
dsname=dsname,
)
return self.axes
<|code_end|>
examples/plotting/plot_skewt.py
<|code_start|>"""
Skew-T plot of a sounding
-------------------------
This example shows how to make a Skew-T plot from a sounding
and calculate stability indicies.
"""
from arm_test_data import DATASETS
import metpy
import xarray as xr
from matplotlib import pyplot as plt
import act
# Make sure attributes are retained
xr.set_options(keep_attrs=True)
# Read data
filename_sonde = DATASETS.fetch('sgpsondewnpnC1.b1.20190101.053200.cdf')
sonde_ds = act.io.arm.read_arm_netcdf(filename_sonde)
print(list(sonde_ds))
# Calculate stability indicies
sonde_ds = act.retrievals.calculate_stability_indicies(
sonde_ds, temp_name='tdry', td_name='dp', p_name='pres'
)
print(sonde_ds['lifted_index'])
# Set up plot
skewt = act.plotting.SkewTDisplay(sonde_ds, figsize=(15, 10))
# Add data
skewt.plot_from_u_and_v('u_wind', 'v_wind', 'pres', 'tdry', 'dp')
sonde_ds.close()
plt.show()
<|code_end|>
|
act/plotting/skewtdisplay.py
<|code_start|>"""
Stores the class for SkewTDisplay.
"""
import warnings
from copy import deepcopy
import matplotlib.pyplot as plt
# Import third party libraries
import metpy
import metpy.calc as mpcalc
import numpy as np
import scipy
from metpy.plots import Hodograph, SkewT
from metpy.units import units
from ..retrievals import calculate_stability_indicies
# Import Local Libs
from ..utils import datetime_utils as dt_utils
from .plot import Display
class SkewTDisplay(Display):
"""
A class for making Skew-T plots.
This is inherited from the :func:`act.plotting.Display`
class and has therefore has the same attributes as that class.
See :func:`act.plotting.Display`
for more information. There are no additional attributes or parameters
to this class.
In order to create Skew-T plots, ACT needs the MetPy package to be
installed on your system. More information about
MetPy go here: https://unidata.github.io/MetPy/latest/index.html.
Examples
--------
Here is an example of how to make a Skew-T plot using ACT:
.. code-block :: python
sonde_ds = act.io.arm.read_arm_netcdf(
act.tests.sample_files.EXAMPLE_SONDE1)
skewt = act.plotting.SkewTDisplay(sonde_ds)
skewt.plot_from_u_and_v('u_wind', 'v_wind', 'pres', 'tdry', 'dp')
plt.show()
"""
def __init__(self, ds, subplot_shape=(1,), subplot=None, ds_name=None, set_fig=None, **kwargs):
# We want to use our routine to handle subplot adding, not the main
# one
new_kwargs = kwargs.copy()
super().__init__(ds, None, ds_name, subplot_kw=dict(projection='skewx'),
**new_kwargs)
# Make a SkewT object for each subplot
self.add_subplots(subplot_shape, set_fig=set_fig, subplot=subplot, **kwargs)
def add_subplots(self, subplot_shape=(1,), set_fig=None, subplot=None, **kwargs):
"""
Adds subplots to the Display object. The current
figure in the object will be deleted and overwritten.
Parameters
----------
subplot_shape : 1 or 2D tuple, list, or array
The structure of the subplots in (rows, cols).
subplot_kw : dict, optional
The kwargs to pass into fig.subplots.
set_fig : matplotlib figure, optional
Figure to pass to SkewT
**kwargs : keyword arguments
Any other keyword arguments that will be passed
into :func:`matplotlib.pyplot.figure` when the figure
is made. The figure is only made if the *fig*
property is None. See the matplotlib
documentation for further details on what keyword
arguments are available.
"""
del self.axes
if self.fig is None and set_fig is None:
self.fig = plt.figure(**kwargs)
if set_fig is not None:
self.fig = set_fig
self.SkewT = np.empty(shape=subplot_shape, dtype=SkewT)
self.axes = np.empty(shape=subplot_shape, dtype=plt.Axes)
if len(subplot_shape) == 1:
for i in range(subplot_shape[0]):
if subplot is None:
subplot_tuple = (subplot_shape[0], 1, i + 1)
else:
subplot_tuple = subplot
self.SkewT[i] = SkewT(fig=self.fig, subplot=subplot_tuple)
self.axes[i] = self.SkewT[i].ax
elif len(subplot_shape) == 2:
for i in range(subplot_shape[0]):
for j in range(subplot_shape[1]):
subplot_tuple = (
subplot_shape[0],
subplot_shape[1],
i * subplot_shape[1] + j + 1,
)
self.SkewT[i, j] = SkewT(fig=self.fig, subplot=subplot_tuple)
self.axes[i, j] = self.SkewT[i, j].ax
else:
raise ValueError('Subplot shape must be 1 or 2D!')
def set_xrng(self, xrng, subplot_index=(0,)):
"""
Sets the x range of the plot.
Parameters
----------
xrng : 2 number array.
The x limits of the plot.
subplot_index : 1 or 2D tuple, list, or array
The index of the subplot to set the x range of.
"""
if self.axes is None:
raise RuntimeError('set_xrng requires the plot to be displayed.')
if not hasattr(self, 'xrng') or np.all(self.xrng == 0):
if len(self.axes.shape) == 2:
self.xrng = np.zeros((self.axes.shape[0], self.axes.shape[1], 2))
else:
self.xrng = np.zeros((self.axes.shape[0], 2))
self.axes[subplot_index].set_xlim(xrng)
self.xrng[subplot_index, :] = np.array(xrng)
def set_yrng(self, yrng, subplot_index=(0,)):
"""
Sets the y range of the plot.
Parameters
----------
yrng : 2 number array
The y limits of the plot.
subplot_index : 1 or 2D tuple, list, or array
The index of the subplot to set the x range of.
"""
if self.axes is None:
raise RuntimeError('set_yrng requires the plot to be displayed.')
if not hasattr(self, 'yrng') or np.all(self.yrng == 0):
if len(self.axes.shape) == 2:
self.yrng = np.zeros((self.axes.shape[0], self.axes.shape[1], 2))
else:
self.yrng = np.zeros((self.axes.shape[0], 2))
if not hasattr(self, 'yrng') and len(self.axes.shape) == 2:
self.yrng = np.zeros((self.axes.shape[0], self.axes.shape[1], 2))
elif not hasattr(self, 'yrng') and len(self.axes.shape) == 1:
self.yrng = np.zeros((self.axes.shape[0], 2))
if yrng[0] == yrng[1]:
yrng[1] = yrng[1] + 1
self.axes[subplot_index].set_ylim(yrng)
self.yrng[subplot_index, :] = yrng
def plot_from_spd_and_dir(
self, spd_field, dir_field, p_field, t_field, td_field, dsname=None, **kwargs
):
"""
This plot will make a sounding plot from wind data that is given
in speed and direction.
Parameters
----------
spd_field : str
The name of the field corresponding to the wind speed.
dir_field : str
The name of the field corresponding to the wind direction
in degrees from North.
p_field : str
The name of the field containing the atmospheric pressure.
t_field : str
The name of the field containing the atmospheric temperature.
td_field : str
The name of the field containing the dewpoint.
dsname : str or None
The name of the datastream to plot. Set to None to make ACT
attempt to automatically determine this.
kwargs : dict
Additional keyword arguments will be passed into
:func:`act.plotting.SkewTDisplay.plot_from_u_and_v`
Returns
-------
ax : matplotlib axis handle
The matplotlib axis handle corresponding to the plot.
"""
if dsname is None and len(self._ds.keys()) > 1:
raise ValueError(
'You must choose a datastream when there are 2 '
'or more datasets in the TimeSeriesDisplay '
'object.'
)
elif dsname is None:
dsname = list(self._ds.keys())[0]
# Make temporary field called tempu, tempv
spd = self._ds[dsname][spd_field].values * units(self._ds[dsname][spd_field].attrs['units'])
dir = self._ds[dsname][dir_field].values * units(self._ds[dsname][dir_field].attrs['units'])
tempu, tempv = mpcalc.wind_components(spd, dir)
self._ds[dsname]['temp_u'] = deepcopy(self._ds[dsname][spd_field])
self._ds[dsname]['temp_v'] = deepcopy(self._ds[dsname][spd_field])
self._ds[dsname]['temp_u'].values = tempu
self._ds[dsname]['temp_v'].values = tempv
the_ax = self.plot_from_u_and_v(
'temp_u', 'temp_v', p_field, t_field, td_field, dsname, **kwargs
)
del self._ds[dsname]['temp_u'], self._ds[dsname]['temp_v']
return the_ax
def plot_from_u_and_v(
self,
u_field,
v_field,
p_field,
t_field,
td_field,
dsname=None,
subplot_index=(0,),
p_levels_to_plot=None,
show_parcel=True,
shade_cape=True,
shade_cin=True,
set_title=None,
smooth_p=3,
plot_dry_adiabats=False,
plot_moist_adiabats=False,
plot_mixing_lines=False,
plot_barbs_kwargs=dict(),
plot_kwargs=dict(),
dry_adiabats_kwargs=dict(),
moist_adiabats_kwargs=dict(),
mixing_lines_kwargs=dict(),
):
"""
This function will plot a Skew-T from a sounding dataset. The wind
data must be given in u and v.
Parameters
----------
u_field : str
The name of the field containing the u component of the wind.
v_field : str
The name of the field containing the v component of the wind.
p_field : str
The name of the field containing the pressure.
t_field : str
The name of the field containing the temperature.
td_field : str
The name of the field containing the dewpoint temperature.
dsname : str or None
The name of the datastream to plot. Set to None to make ACT
attempt to automatically determine this.
subplot_index : tuple
The index of the subplot to make the plot on.
p_levels_to_plot : 1D array
The pressure levels to plot the wind barbs on. Set to None
to have ACT to use neatly spaced defaults of
25, 50, 75, 100, 150, 200, 250, 300, 400, 500, 600, 700, 750, 800,
850, 900, 950, and 1000 hPa.
show_parcel : bool
Set to true to calculate the profile a parcel takes through the atmosphere
using the metpy.calc.parcel_profile function. From their documentation,
the parcel starts at the surface temperature and dewpoint, is lifted up
dry adiabatically to the LCL and then moist adiabatically from there.
shade_cape : bool
Set to True to shade the CAPE red.
shade_cin : bool
Set to True to shade the CIN blue.
set_title : None or str
The title of the plot is set to this. Set to None to use
a default title.
smooth_p : int
If pressure is not in descending order, will smooth the data
using this many points to try and work around the issue.
Default is 3 but inthe pbl retrieval code we have to default to 5 at times
plot_barbs_kwargs : dict
Additional keyword arguments to pass into MetPy's
SkewT.plot_barbs.
plot_kwargs : dict
Additional keyword arguments to pass into MetPy's
SkewT.plot.
dry_adiabats_kwargs : dict
Additional keyword arguments to pass into MetPy's plot_dry_adiabats function
moist_adiabats_kwargs : dict
Additional keyword arguments to pass into MetPy's plot_moist_adiabats function
mixing_lines_kwargs : dict
Additional keyword arguments to pass into MetPy's plot_mixing_lines function
Returns
-------
ax : matplotlib axis handle
The axis handle to the plot.
References
----------
May, R. M., Arms, S. C., Marsh, P., Bruning, E., Leeman, J. R., Goebbert, K., Thielen, J. E.,
Bruick, Z., and Camron, M. D., 2023: MetPy: A Python Package for Meteorological Data.
Unidata, Unidata/MetPy, doi:10.5065/D6WW7G29.
"""
if dsname is None and len(self._ds.keys()) > 1:
raise ValueError(
'You must choose a datastream when there are 2 '
'or more datasets in the TimeSeriesDisplay '
'object.'
)
elif dsname is None:
dsname = list(self._ds.keys())[0]
if p_levels_to_plot is None:
p_levels_to_plot = np.array(
[
25.0,
50.0,
75.0,
100.0,
150.0,
200.0,
250.0,
300.0,
400.0,
500.0,
600.0,
700.0,
750.0,
800.0,
850.0,
900.0,
950.0,
1000.0,
]
) * units('hPa')
# Get pressure and smooth if not in order
p = self._ds[dsname][p_field]
if not all(p[i] <= p[i + 1] for i in range(len(p) - 1)):
if 'time' in self._ds:
self._ds[dsname][p_field] = (
self._ds[dsname][p_field].rolling(time=smooth_p, min_periods=1, center=True).mean()
)
p = self._ds[dsname][p_field]
p_units = self._ds[dsname][p_field].attrs['units']
p = p.values * getattr(units, p_units)
if len(np.shape(p)) == 2:
p = np.reshape(p, p.shape[0] * p.shape[1])
T = self._ds[dsname][t_field]
T_units = self._ds[dsname][t_field].attrs['units']
if T_units == 'C':
T_units = 'degC'
T = T.values * getattr(units, T_units)
if len(np.shape(T)) == 2:
T = np.reshape(T, T.shape[0] * T.shape[1])
Td = self._ds[dsname][td_field]
Td_units = self._ds[dsname][td_field].attrs['units']
if Td_units == 'C':
Td_units = 'degC'
Td = Td.values * getattr(units, Td_units)
if len(np.shape(Td)) == 2:
Td = np.reshape(Td, Td.shape[0] * Td.shape[1])
u = self._ds[dsname][u_field]
u_units = self._ds[dsname][u_field].attrs['units']
u = u.values * getattr(units, u_units)
if len(np.shape(u)) == 2:
u = np.reshape(u, u.shape[0] * u.shape[1])
v = self._ds[dsname][v_field]
v_units = self._ds[dsname][v_field].attrs['units']
v = v.values * getattr(units, v_units)
if len(np.shape(v)) == 2:
v = np.reshape(v, v.shape[0] * v.shape[1])
u_red = np.zeros_like(p_levels_to_plot) * getattr(units, u_units)
v_red = np.zeros_like(p_levels_to_plot) * getattr(units, v_units)
# Check p_levels_to_plot units, and convert to p units if needed
if not hasattr(p_levels_to_plot, 'units'):
p_levels_to_plot = p_levels_to_plot * getattr(units, p_units)
else:
p_levels_to_plot = p_levels_to_plot.to(p_units)
for i in range(len(p_levels_to_plot)):
index = np.argmin(np.abs(p_levels_to_plot[i] - p))
u_red[i] = u[index].magnitude * getattr(units, u_units)
v_red[i] = v[index].magnitude * getattr(units, v_units)
self.SkewT[subplot_index].plot(p, T, 'r', **plot_kwargs)
self.SkewT[subplot_index].plot(p, Td, 'g', **plot_kwargs)
self.SkewT[subplot_index].plot_barbs(
p_levels_to_plot.magnitude, u_red, v_red, **plot_barbs_kwargs
)
# Metpy fix if Pressure does not decrease monotonically in
# your sounding.
try:
prof = mpcalc.parcel_profile(p, T[0], Td[0]).to('degC')
except metpy.calc.exceptions.InvalidSoundingError:
p = scipy.ndimage.median_filter(p, 3, output=float)
p = metpy.units.units.Quantity(p, p_units)
prof = mpcalc.parcel_profile(p, T[0], Td[0]).to('degC')
if show_parcel:
# Only plot where prof > T
lcl_pressure, lcl_temperature = mpcalc.lcl(p[0], T[0], Td[0])
self.SkewT[subplot_index].plot(
lcl_pressure, lcl_temperature, 'ko', markerfacecolor='black', **plot_kwargs
)
self.SkewT[subplot_index].plot(p, prof, 'k', linewidth=2, **plot_kwargs)
if shade_cape:
self.SkewT[subplot_index].shade_cape(p, T, prof, linewidth=2)
if shade_cin:
self.SkewT[subplot_index].shade_cin(p, T, prof, linewidth=2)
# Get plot temperatures from x-axis as t0
t0 = self.SkewT[subplot_index].ax.get_xticks() * getattr(units, T_units)
# Add minimum pressure to pressure levels to plot
if np.nanmin(p.magnitude) < np.nanmin(p_levels_to_plot.magnitude):
plp = np.insert(p_levels_to_plot.magnitude, 0, np.nanmin(p.magnitude)) * units('hPa')
else:
plp = p_levels_to_plot
# New options for plotting dry and moist adiabats as well as the mixing lines
if plot_dry_adiabats:
self.SkewT[subplot_index].plot_dry_adiabats(pressure=plp, t0=t0, **dry_adiabats_kwargs)
if plot_moist_adiabats:
self.SkewT[subplot_index].plot_moist_adiabats(t0=t0, pressure=plp, **moist_adiabats_kwargs)
if plot_mixing_lines:
self.SkewT[subplot_index].plot_mixing_lines(pressure=plp, **mixing_lines_kwargs)
# Set Title
if set_title is None:
if 'time' in self._ds[dsname]:
title_time = dt_utils.numpy_to_arm_date(self._ds[dsname].time.values[0]),
elif '_file_dates' in self._ds[dsname].attrs:
title_time = self._ds[dsname].attrs['_file_dates'][0]
else:
title_time = ''
set_title = ' '.join([dsname, 'on', title_time[0]])
self.axes[subplot_index].set_title(set_title)
# Set Y Limit
our_data = p.magnitude
if np.isfinite(our_data).any():
yrng = [np.nanmax(our_data), np.nanmin(our_data)]
else:
yrng = [1000.0, 100.0]
self.set_yrng(yrng, subplot_index)
# Set X Limit
xrng = [np.nanmin(T.magnitude) - 10.0, np.nanmax(T.magnitude) + 10.0]
self.set_xrng(xrng, subplot_index)
return self.axes[subplot_index]
def plot_hodograph(
self,
spd_field,
dir_field,
color_field=None,
set_fig=None,
set_axes=None,
component_range=80,
dsname=None,
uv_flag=False,
):
"""
This will plot a hodograph from the radiosonde wind data using
MetPy
Parameters
----------
spd_field : str
The name of the field corresponding to the wind speed.
dir_field : str
The name of the field corresponding to the wind direction
in degrees from North.
color_field : str, optional
The name of the field if wanting to shade by another variable
set_fig : matplotlib figure, optional
The figure to plot on
set_axes : matplotlib axes, optional
The specific axes to plot on
component_range : int
Range of the hodograph. Default is 80
dsname : str
Name of the datastream to plot if multiple in the plot object
uv_flag : boolean
If set to True, spd_field and dir_field will be treated as the
U and V wind variable names
Returns
-------
self.axes : matplotlib axes
"""
if dsname is None and len(self._ds.keys()) > 1:
raise ValueError(
'You must choose a datastream when there are 2 '
'or more datasets in the TimeSeriesDisplay '
'object.'
)
elif dsname is None:
dsname = list(self._ds.keys())[0]
# Get the current plotting axis
if set_fig is not None:
self.fig = set_fig
if set_axes is not None:
self.axes = set_axes
if self.fig is None:
self.fig = plt.figure()
if self.axes is None:
self.axes = np.array([plt.axes()])
self.fig.add_axes(self.axes[0])
# Calculate u/v wind components from speed/direction
if uv_flag is False:
spd = self._ds[dsname][spd_field].values * units(
self._ds[dsname][spd_field].attrs['units']
)
dir = self._ds[dsname][dir_field].values * units(
self._ds[dsname][dir_field].attrs['units']
)
u, v = mpcalc.wind_components(spd, dir)
else:
u = self._ds[dsname][spd_field].values * units(
self._ds[dsname][spd_field].attrs['units']
)
v = self._ds[dsname][dir_field].values * units(
self._ds[dsname][dir_field].attrs['units']
)
# Plot out the data using the Hodograph method
h = Hodograph(self.axes, component_range=component_range)
h.add_grid(increment=20)
if color_field is None:
h.plot(u, v)
else:
data = self._ds[dsname][color_field].values * units(
self._ds[dsname][color_field].attrs['units']
)
h.plot_colormapped(u, v, data)
return self.axes
def add_stability_info(
self,
temp_name='tdry',
td_name='dp',
p_name='pres',
overwrite_data=None,
add_data=None,
set_fig=None,
set_axes=None,
dsname=None,
):
"""
This plot will make a sounding plot from wind data that is given
in speed and direction.
Parameters
----------
temp_name : str
The name of the temperature field.
td_name : str
The name of the dewpoint field.
p_name : str
The name of the pressure field.
overwrite_data : dict
A disctionary of variables/values to write out instead
of the ones calculated by MetPy. Needs to be of the form
.. code-block:: python
overwrite_data={'LCL': 234, 'CAPE': 25}
...
add_data : dict
A dictionary of variables and values to write out in
addition to the MetPy calculated ones
set_fig : matplotlib figure, optional
The figure to plot on
set_axes : matplotlib axes, optional
The specific axes to plot on
dsname : str
Name of the datastream to plot if multiple in the plot object
Returns
-------
self.axes : matplotlib axes
"""
if dsname is None and len(self._ds.keys()) > 1:
raise ValueError(
'You must choose a datastream when there are 2 '
'or more datasets in the TimeSeriesDisplay '
'object.'
)
elif dsname is None:
dsname = list(self._ds.keys())[0]
# Get the current plotting axis
if set_fig is not None:
self.fig = set_fig
if set_axes is not None:
self.axes = set_axes
if self.fig is None:
self.fig = plt.figure()
if self.axes is None:
self.axes = np.array([plt.axes()])
self.fig.add_axes(self.axes[0])
self.axes.spines['top'].set_visible(False)
self.axes.spines['right'].set_visible(False)
self.axes.spines['bottom'].set_visible(False)
self.axes.spines['left'].set_visible(False)
self.axes.get_xaxis().set_ticks([])
self.axes.get_yaxis().set_ticks([])
ct = 0
if overwrite_data is None:
# Calculate stability indicies
ds_sonde = calculate_stability_indicies(
self._ds[dsname],
temp_name=temp_name,
td_name=td_name,
p_name=p_name,
)
# Add MetPy calculated variables to the list
variables = {
'lifted_index': 'Lifted Index',
'surface_based_cape': 'SBCAPE',
'surface_based_cin': 'SBCIN',
'most_unstable_cape': 'MUCAPE',
'most_unstable_cin': 'MUCIN',
'lifted_condensation_level_temperature': 'LCL Temp',
'lifted_condensation_level_pressure': 'LCL Pres',
}
for i, v in enumerate(variables):
var_string = str(np.round(ds_sonde[v].values, 2))
self.axes.text(
-0.05,
(0.98 - (0.1 * i)),
variables[v] + ': ',
transform=self.axes.transAxes,
fontsize=10,
verticalalignment='top',
)
self.axes.text(
0.95,
(0.98 - (0.1 * i)),
var_string,
transform=self.axes.transAxes,
fontsize=10,
verticalalignment='top',
horizontalalignment='right',
)
ct += 1
else:
# If overwrite_data is set, the user passes in their own dictionary
for i, v in enumerate(overwrite_data):
var_string = str(np.round(overwrite_data[v], 2))
self.axes.text(
-0.05,
(0.98 - (0.1 * i)),
v + ': ',
transform=self.axes.transAxes,
fontsize=10,
verticalalignment='top',
)
self.axes.text(
0.95,
(0.98 - (0.1 * i)),
var_string,
transform=self.axes.transAxes,
fontsize=10,
verticalalignment='top',
horizontalalignment='right',
)
# User can also add variables to the existing ones calculated by MetPy
if add_data is not None:
for i, v in enumerate(add_data):
var_string = str(np.round(add_data[v], 2))
self.axes.text(
-0.05,
(0.98 - (0.1 * (i + ct))),
v + ': ',
transform=self.axes.transAxes,
fontsize=10,
verticalalignment='top',
)
self.axes.text(
0.95,
(0.98 - (0.1 * (i + ct))),
var_string,
transform=self.axes.transAxes,
fontsize=10,
verticalalignment='top',
horizontalalignment='right',
)
return self.axes
def plot_enhanced_skewt(
self,
spd_name='wspd',
dir_name='deg',
temp_name='tdry',
td_name='dp',
p_name='pres',
overwrite_data=None,
add_data=None,
color_field=None,
component_range=80,
uv_flag=False,
dsname=None,
figsize=(14, 10),
layout='constrained',
):
"""
This will plot an enhanced Skew-T plot with a Hodograph on the top right
and the stability parameters on the lower right. This will create a new
figure so that one does not need to be defined through subplot_shape.
Requires Matplotlib v 3.7 and higher
Parameters
----------
spd_name : str
The name of the field corresponding to the wind speed.
dir_name : str
The name of the field corresponding to the wind direction
in degrees from North.
temp_name : str
The name of the temperature field.
td_name : str
The name of the dewpoint field.
p_name : str
The name of the pressure field.
overwrite_data : dict
A disctionary of variables/values to write out instead
of the ones calculated by MetPy. Needs to be of the form
.. code-block:: python
overwrite_data={'LCL': 234, 'CAPE': 25}
...
add_data : dict
A dictionary of variables and values to write out in
addition to the MetPy calculated ones
color_field : str, optional
The name of the field if wanting to shade by another variable
component_range : int
Range of the hodograph. Default is 80
uv_flag : boolean
If set to True, spd_field and dir_field will be treated as the
U and V wind variable names
dsname : str
Name of the datastream to plot if multiple in the plot object
figsize : tuple
Figure size for the plot
layout : str
String to pass to matplotlib.figure.Figure object layout keyword
argument. Choice of 'constrained,' 'compressed,' 'tight,' or None.
Default is 'constrained'.
Returns
-------
self.axes : matplotlib axes
"""
# Set up the figure and axes
# Close existing figure as a new one will be created
plt.close('all')
subplot_kw = {'a': {'projection': 'skewx'}}
fig, axs = plt.subplot_mosaic(
[['a', 'a', 'b'], ['a', 'a', 'b'], ['a', 'a', 'c'], ['a', 'a', 'c']],
layout=layout,
per_subplot_kw=subplot_kw,
)
self.fig = fig
self.axes = axs
# Plot out the Skew-T
display = SkewTDisplay(self._ds, set_fig=fig, subplot=axs['a'], figsize=figsize)
if uv_flag is True:
display.plot_from_u_and_v(spd_name, dir_name, p_name, temp_name, td_name)
else:
display.plot_from_spd_and_dir(spd_name, dir_name, p_name, temp_name, td_name)
# Plot the hodograph
display.plot_hodograph(
spd_name,
dir_name,
set_axes=axs['b'],
color_field=color_field,
component_range=component_range,
dsname=dsname,
uv_flag=uv_flag,
)
# Add Stability information
display.add_stability_info(
set_axes=axs['c'],
temp_name=temp_name,
td_name=td_name,
p_name=p_name,
overwrite_data=overwrite_data,
add_data=add_data,
dsname=dsname,
)
return self.axes
<|code_end|>
examples/plotting/plot_skewt.py
<|code_start|>"""
Skew-T plot of a sounding
-------------------------
This example shows how to make a Skew-T plot from a sounding
and calculate stability indicies.
"""
from arm_test_data import DATASETS
import metpy
import xarray as xr
from matplotlib import pyplot as plt
import act
# Make sure attributes are retained
xr.set_options(keep_attrs=True)
# Read data
filename_sonde = DATASETS.fetch('sgpsondewnpnC1.b1.20190101.053200.cdf')
sonde_ds = act.io.arm.read_arm_netcdf(filename_sonde)
print(list(sonde_ds))
# Calculate stability indicies
sonde_ds = act.retrievals.calculate_stability_indicies(
sonde_ds, temp_name='tdry', td_name='dp', p_name='pres'
)
print(sonde_ds['lifted_index'])
# Set up plot
skewt = act.plotting.SkewTDisplay(sonde_ds, figsize=(15, 10))
# Add data
skewt.plot_from_u_and_v('u_wind', 'v_wind', 'pres', 'tdry', 'dp')
plt.show()
# One could also add options like adiabats and mixing lines
skewt = act.plotting.SkewTDisplay(sonde_ds, figsize=(15, 10))
skewt.plot_from_u_and_v('u_wind', 'v_wind', 'pres', 'tdry', 'dp', plot_dry_adiabats=True,
plot_moist_adiabats=True, plot_mixing_lines=True)
plt.show()
sonde_ds.close()
<|code_end|>
|
Function to create movies
The DQ Office creates many movies from static plots. Another researcher is working on a project that would benefit from a simple way to create moves from plots. We should create a function to make movies from a list of images. Finding a mostly Python way to do this would be best.
|
act/utils/__init__.py
<|code_start|>"""
This module contains the common procedures used by all modules of the ARM
Community Toolkit.
"""
import lazy_loader as lazy
__getattr__, __dir__, __all__ = lazy.attach(
__name__,
submodules=['data_utils', 'datetime_utils', 'geo_utils', 'inst_utils', 'io_utils', 'qc_utils', 'radiance_utils', 'ship_utils'],
submod_attrs={
'data_utils': [
'ChangeUnits',
'accumulate_precip',
'add_in_nan',
'assign_coordinates',
'convert_units',
'create_pyart_obj',
'get_missing_value',
'ts_weighted_average',
'height_adjusted_pressure',
'height_adjusted_temperature',
'convert_to_potential_temp',
'arm_site_location_search',
'DatastreamParserARM',
],
'datetime_utils': [
'dates_between',
'datetime64_to_datetime',
'determine_time_delta',
'numpy_to_arm_date',
'reduce_time_ranges',
'date_parser',
'adjust_timestamp'
],
'geo_utils': [
'add_solar_variable',
'destination_azimuth_distance',
'get_solar_azimuth_elevation',
'get_sunrise_sunset_noon',
'is_sun_visible',
],
'inst_utils': ['decode_present_weather'],
'qc_utils': ['calculate_dqr_times'],
'radiance_utils': ['planck_converter'],
'ship_utils': ['calc_cog_sog', 'proc_scog'],
'io_utils': ['pack_tar',
'unpack_tar',
'cleanup_files',
'is_gunzip_file',
'pack_gzip',
'unpack_gzip'
],
},
)
<|code_end|>
act/utils/io_utils.py
<|code_start|>from pathlib import Path
import tarfile
from os import PathLike
from shutil import rmtree
import random
import string
import gzip
import shutil
import tempfile
def pack_tar(filenames, write_filename=None, write_directory=None, remove=False):
"""
Creates TAR file from list of filenames provided. Currently only works with
all files existing in the same directory.
...
Parameters
----------
filenames : str or list
Filenames to be placed in TAR file
write_filename : str, pathlib.Path, None
TAR output filename. If not provided will use file name 'created_tarfile.tar'
write_directory : str, pathlib.Path, None
Path to directory to write TAR file. If the directory does not exist will
be created.
remove : boolean
Delete provided filenames after making TAR file
Returns
-------
list
List of files extracted from the TAR file or full path to created direcotry
containing extracted files.
"""
if write_filename is None:
write_filename = 'created_tarfile.tar'
if isinstance(filenames, (str, PathLike)):
filenames = [filenames]
if write_directory is not None:
write_directory = Path(write_directory)
write_directory.mkdir(parents=True, exist_ok=True)
write_filename = Path(write_filename).name
elif Path(write_filename).parent != Path('.'):
write_directory = Path(write_filename).parent
else:
write_directory = Path('.')
if not str(write_filename).endswith('.tar'):
write_filename = str(write_filename) + '.tar'
write_filename = Path(write_directory, write_filename)
tar_file_handle = tarfile.open(write_filename, "w")
for filename in filenames:
tar_file_handle.add(filename, arcname=Path(filename).name)
tar_file_handle.close()
if remove:
for filename in filenames:
Path(filename).unlink()
return str(write_filename)
def unpack_tar(tar_files, write_directory=None, temp_dir=False, randomize=True,
return_files=True, remove=False):
"""
Unpacks TAR file contents into provided base directory
...
Parameters
----------
tar_files : str or list
path to TAR file to be unpacked
write_directory : str or pathlib.Path
base path to extract contents of TAR files or create a new randomized directory
to extract contents of TAR file.
temp_dir : boolean
Should a temporary directory be created and TAR files extracted to the new directory.
write_directory and randomize are ignored if this option is used.
randomize : boolean
Create a new randomized directory to extract TAR files into.
return_files : boolean
When set will return a list of full path filenames to the extracted files.
When set to False will return full path to directory containing extracted files.
remove : boolean
Delete provided TAR files after extracting files.
Returns
-------
files : list or str
List of full path files extracted from the TAR file or full path to direcotry
containing extracted files.
"""
files = []
if isinstance(tar_files, (str, PathLike)):
tar_files = [tar_files]
out_dir = Path.cwd()
if temp_dir is True:
out_dir = Path(tempfile.TemporaryDirectory().name)
else:
if write_directory is not None:
out_dir = Path(write_directory)
else:
out_dir = Path(Path(tar_files[0]).parent)
if out_dir.is_dir() is False:
out_dir.mkdir(parents=True, exist_ok=True)
if randomize:
out_dir = Path(tempfile.mkdtemp(dir=out_dir))
for tar_file in tar_files:
try:
tar = tarfile.open(tar_file)
tar.extractall(path=out_dir)
result = [str(Path(out_dir, ii.name)) for ii in tar.getmembers()]
files.extend(result)
tar.close()
except tarfile.ReadError:
print(f"\nCould not extract files from {tar_file}")
if return_files is False:
files = str(out_dir)
else:
files.sort()
if remove:
for tar_file in tar_files:
Path(tar_file).unlink()
return files
def cleanup_files(dirname=None, files=None):
"""
Cleans up files and directory possibly created from unpacking TAR files with unpack_tar()
...
Parameters
----------
dirname : str, pathlib.Path, None
Path to directory of extracted files which will be removed.
files : str, pahtlib.Path, list, None
Full path file name(s) from extracted TAR file.
Assumes the directory this file exists in should be removed.
"""
if isinstance(files, (str, PathLike)):
files = [str(files)]
try:
if dirname is not None:
rmtree(dirname)
if files is not None and len(files) > 0 and Path(files[0]).is_file():
out_dir = Path(files[0]).parent
rmtree(out_dir)
except Exception as error:
print("\nError removing files:", error)
def is_gunzip_file(filepath):
"""
Function to test if file is a gunzip file.
Parameters
----------
filepath : str or pathlib.Path to file to test
Returns
-------
test : boolean
Result from testing if file is a gunzip file
"""
try:
with open(str(filepath), 'rb') as test_f:
return test_f.read(2) == b'\x1f\x8b'
except Exception:
return False
def pack_gzip(filename, write_directory=None, remove=False):
"""
Creates a gunzip file from a filename path
...
Parameters
----------
filename : str, pathlib.Path
Filename to use in creation of gunzip version.
write_directory : str, pahtlib.Path, list, None
Path to directory to place newly created gunzip file.
remove : boolean
Remove provided filename after creating gunzip file
Returns
-------
write_filename : str
Full path name of created gunzip file
"""
write_filename = Path(filename).name + '.gz'
if write_directory is not None:
write_filename = Path(write_directory, write_filename)
Path(write_directory).mkdir(parents=True, exist_ok=True)
else:
write_filename = Path(Path(filename).parent, write_filename)
with open(filename, 'rb') as f_in:
with gzip.open(write_filename, 'wb') as f_out:
shutil.copyfileobj(f_in, f_out)
if remove:
Path(filename).unlink()
return str(write_filename)
def unpack_gzip(filename, write_directory=None, remove=False):
"""
Extracts file from a gunzip file.
...
Parameters
----------
filename : str, pathlib.Path
Filename to use in extraction of gunzip file.
write_directory : str, pahtlib.Path, list, None
Path to directory to place newly created gunzip file.
remove : boolean
Remove provided filename after creating gunzip file
Returns
-------
write_filename : str
Full path name of created gunzip file
"""
if write_directory is None:
write_directory = Path(filename).parent
write_filename = Path(filename).name
if write_filename.endswith('.gz'):
write_filename = write_filename.replace(".gz", "")
write_filename = Path(write_directory, write_filename)
with gzip.open(filename, "rb") as f_in:
with open(write_filename, "wb") as f_out:
shutil.copyfileobj(f_in, f_out)
if remove:
Path(filename).unlink()
return str(write_filename)
<|code_end|>
|
act/utils/__init__.py
<|code_start|>"""
This module contains the common procedures used by all modules of the ARM
Community Toolkit.
"""
import lazy_loader as lazy
__getattr__, __dir__, __all__ = lazy.attach(
__name__,
submodules=['data_utils', 'datetime_utils', 'geo_utils', 'inst_utils', 'io_utils', 'qc_utils', 'radiance_utils', 'ship_utils'],
submod_attrs={
'data_utils': [
'ChangeUnits',
'accumulate_precip',
'add_in_nan',
'assign_coordinates',
'convert_units',
'create_pyart_obj',
'get_missing_value',
'ts_weighted_average',
'height_adjusted_pressure',
'height_adjusted_temperature',
'convert_to_potential_temp',
'arm_site_location_search',
'DatastreamParserARM',
],
'datetime_utils': [
'dates_between',
'datetime64_to_datetime',
'determine_time_delta',
'numpy_to_arm_date',
'reduce_time_ranges',
'date_parser',
'adjust_timestamp'
],
'geo_utils': [
'add_solar_variable',
'destination_azimuth_distance',
'get_solar_azimuth_elevation',
'get_sunrise_sunset_noon',
'is_sun_visible',
],
'inst_utils': ['decode_present_weather'],
'qc_utils': ['calculate_dqr_times'],
'radiance_utils': ['planck_converter'],
'ship_utils': ['calc_cog_sog', 'proc_scog'],
'io_utils': ['pack_tar',
'unpack_tar',
'cleanup_files',
'is_gunzip_file',
'pack_gzip',
'unpack_gzip',
'generate_movie'
],
},
)
<|code_end|>
act/utils/io_utils.py
<|code_start|>from pathlib import Path
import tarfile
from os import PathLike
from shutil import rmtree
import random
import string
import gzip
import shutil
import tempfile
try:
import moviepy.video.io.ImageSequenceClip
MOVIEPY_AVAILABLE = True
except ImportError:
MOVIEPY_AVAILABLE = False
def pack_tar(filenames, write_filename=None, write_directory=None, remove=False):
"""
Creates TAR file from list of filenames provided. Currently only works with
all files existing in the same directory.
...
Parameters
----------
filenames : str or list
Filenames to be placed in TAR file
write_filename : str, pathlib.Path, None
TAR output filename. If not provided will use file name 'created_tarfile.tar'
write_directory : str, pathlib.Path, None
Path to directory to write TAR file. If the directory does not exist will
be created.
remove : boolean
Delete provided filenames after making TAR file
Returns
-------
list
List of files extracted from the TAR file or full path to created direcotry
containing extracted files.
"""
if write_filename is None:
write_filename = 'created_tarfile.tar'
if isinstance(filenames, (str, PathLike)):
filenames = [filenames]
if write_directory is not None:
write_directory = Path(write_directory)
write_directory.mkdir(parents=True, exist_ok=True)
write_filename = Path(write_filename).name
elif Path(write_filename).parent != Path('.'):
write_directory = Path(write_filename).parent
else:
write_directory = Path('.')
if not str(write_filename).endswith('.tar'):
write_filename = str(write_filename) + '.tar'
write_filename = Path(write_directory, write_filename)
tar_file_handle = tarfile.open(write_filename, "w")
for filename in filenames:
tar_file_handle.add(filename, arcname=Path(filename).name)
tar_file_handle.close()
if remove:
for filename in filenames:
Path(filename).unlink()
return str(write_filename)
def unpack_tar(tar_files, write_directory=None, temp_dir=False, randomize=True,
return_files=True, remove=False):
"""
Unpacks TAR file contents into provided base directory
...
Parameters
----------
tar_files : str or list
path to TAR file to be unpacked
write_directory : str or pathlib.Path
base path to extract contents of TAR files or create a new randomized directory
to extract contents of TAR file.
temp_dir : boolean
Should a temporary directory be created and TAR files extracted to the new directory.
write_directory and randomize are ignored if this option is used.
randomize : boolean
Create a new randomized directory to extract TAR files into.
return_files : boolean
When set will return a list of full path filenames to the extracted files.
When set to False will return full path to directory containing extracted files.
remove : boolean
Delete provided TAR files after extracting files.
Returns
-------
files : list or str
List of full path files extracted from the TAR file or full path to direcotry
containing extracted files.
"""
files = []
if isinstance(tar_files, (str, PathLike)):
tar_files = [tar_files]
out_dir = Path.cwd()
if temp_dir is True:
out_dir = Path(tempfile.TemporaryDirectory().name)
else:
if write_directory is not None:
out_dir = Path(write_directory)
else:
out_dir = Path(Path(tar_files[0]).parent)
if out_dir.is_dir() is False:
out_dir.mkdir(parents=True, exist_ok=True)
if randomize:
out_dir = Path(tempfile.mkdtemp(dir=out_dir))
for tar_file in tar_files:
try:
tar = tarfile.open(tar_file)
tar.extractall(path=out_dir)
result = [str(Path(out_dir, ii.name)) for ii in tar.getmembers()]
files.extend(result)
tar.close()
except tarfile.ReadError:
print(f"\nCould not extract files from {tar_file}")
if return_files is False:
files = str(out_dir)
else:
files.sort()
if remove:
for tar_file in tar_files:
Path(tar_file).unlink()
return files
def cleanup_files(dirname=None, files=None):
"""
Cleans up files and directory possibly created from unpacking TAR files with unpack_tar()
...
Parameters
----------
dirname : str, pathlib.Path, None
Path to directory of extracted files which will be removed.
files : str, pahtlib.Path, list, None
Full path file name(s) from extracted TAR file.
Assumes the directory this file exists in should be removed.
"""
if isinstance(files, (str, PathLike)):
files = [str(files)]
try:
if dirname is not None:
rmtree(dirname)
if files is not None and len(files) > 0 and Path(files[0]).is_file():
out_dir = Path(files[0]).parent
rmtree(out_dir)
except Exception as error:
print("\nError removing files:", error)
def is_gunzip_file(filepath):
"""
Function to test if file is a gunzip file.
Parameters
----------
filepath : str or pathlib.Path to file to test
Returns
-------
test : boolean
Result from testing if file is a gunzip file
"""
try:
with open(str(filepath), 'rb') as test_f:
return test_f.read(2) == b'\x1f\x8b'
except Exception:
return False
def pack_gzip(filename, write_directory=None, remove=False):
"""
Creates a gunzip file from a filename path
...
Parameters
----------
filename : str, pathlib.Path
Filename to use in creation of gunzip version.
write_directory : str, pahtlib.Path, list, None
Path to directory to place newly created gunzip file.
remove : boolean
Remove provided filename after creating gunzip file
Returns
-------
write_filename : str
Full path name of created gunzip file
"""
write_filename = Path(filename).name + '.gz'
if write_directory is not None:
write_filename = Path(write_directory, write_filename)
Path(write_directory).mkdir(parents=True, exist_ok=True)
else:
write_filename = Path(Path(filename).parent, write_filename)
with open(filename, 'rb') as f_in:
with gzip.open(write_filename, 'wb') as f_out:
shutil.copyfileobj(f_in, f_out)
if remove:
Path(filename).unlink()
return str(write_filename)
def unpack_gzip(filename, write_directory=None, remove=False):
"""
Extracts file from a gunzip file.
...
Parameters
----------
filename : str, pathlib.Path
Filename to use in extraction of gunzip file.
write_directory : str, pahtlib.Path, list, None
Path to directory to place newly created gunzip file.
remove : boolean
Remove provided filename after creating gunzip file
Returns
-------
write_filename : str
Full path name of created gunzip file
"""
if write_directory is None:
write_directory = Path(filename).parent
write_filename = Path(filename).name
if write_filename.endswith('.gz'):
write_filename = write_filename.replace(".gz", "")
write_filename = Path(write_directory, write_filename)
with gzip.open(filename, "rb") as f_in:
with open(write_filename, "wb") as f_out:
shutil.copyfileobj(f_in, f_out)
if remove:
Path(filename).unlink()
return str(write_filename)
def generate_movie(images, write_directory=None, write_filename=None, fps=10, codec=None, threads=None):
"""
Creates a movie from a list of images
...
Parameters
----------
images : list
List of images in the correct order to make into a movie
write_directory : str, pahtlib.Path, list, None
Path to directory to place newly created gunzip file.
write_filename : str, pathlib.Path, None
Movie output filename
fps: int
Frames per second
codec: int
Codec to use for image encoding
threads: int
Number of threads to use for ffmpeg
Returns
-------
write_filename : str
Full path name of created gunzip file
"""
if not MOVIEPY_AVAILABLE:
raise ImportError(
'MoviePy needs to be installed on your system to make movies.'
)
if write_filename is None:
write_filename = 'movie.mp4'
if write_directory is not None:
write_directory = Path(write_directory)
write_directory.mkdir(parents=True, exist_ok=True)
write_filename = Path(write_filename).name
elif Path(write_filename).parent != Path('.'):
write_directory = Path(write_filename).parent
else:
write_directory = Path('.')
full_path = write_directory / write_filename
clip = moviepy.video.io.ImageSequenceClip.ImageSequenceClip(images, fps=fps)
clip.write_videofile(str(full_path), codec=codec, threads=threads)
return full_path
<|code_end|>
|
Add Hysplit data support
As first documented in #702 , there was interest at the 2023 PI meeting in the ability to read in and work with Hysplit data. We discussed this again on the ACT call and there was interest in this as well from others. It is proposed that we create a simple reader that could read in a file or a url from the web-based hysplit program and convert it to an xarray dataset. The existing PySPLIT repo does not look to be actively maintained.
|
act/io/__init__.py
<|code_start|>"""
This module contains procedures for reading and writing various ARM datasets.
"""
import lazy_loader as lazy
__getattr__, __dir__, __all__ = lazy.attach(
__name__,
submodules=['arm', 'text', 'icartt', 'mpl', 'neon', 'noaagml', 'noaapsl', 'pysp2'],
submod_attrs={
'arm': [
'WriteDataset',
'check_arm_standards',
'create_ds_from_arm_dod',
'read_arm_netcdf',
'check_if_tar_gz_file',
'read_arm_mmcr',
],
'text': ['read_csv'],
'icartt': ['read_icartt'],
'mpl': ['proc_sigma_mplv5_read', 'read_sigma_mplv5'],
'neon': ['read_neon_csv'],
'noaagml': [
'read_gml',
'read_gml_co2',
'read_gml_halo',
'read_gml_met',
'read_gml_ozone',
'read_gml_radiation',
'read_surfrad',
],
'noaapsl': [
'read_psl_wind_profiler',
'read_psl_wind_profiler_temperature',
'read_psl_parsivel',
'read_psl_radar_fmcw_moment',
'read_psl_surface_met',
],
'pysp2': ['read_hk_file', 'read_sp2', 'read_sp2_dat'],
'sodar': ['read_mfas_sodar'],
},
)
<|code_end|>
act/io/hysplit.py
<|code_start|><|code_end|>
examples/io/plot_hysplit.py
<|code_start|><|code_end|>
|
act/io/__init__.py
<|code_start|>"""
This module contains procedures for reading and writing various ARM datasets.
"""
import lazy_loader as lazy
__getattr__, __dir__, __all__ = lazy.attach(
__name__,
submodules=['arm', 'text', 'icartt', 'mpl', 'neon', 'noaagml', 'noaapsl', 'pysp2', 'hysplit'],
submod_attrs={
'arm': [
'WriteDataset',
'check_arm_standards',
'create_ds_from_arm_dod',
'read_arm_netcdf',
'check_if_tar_gz_file',
'read_arm_mmcr',
],
'text': ['read_csv'],
'icartt': ['read_icartt'],
'mpl': ['proc_sigma_mplv5_read', 'read_sigma_mplv5'],
'neon': ['read_neon_csv'],
'noaagml': [
'read_gml',
'read_gml_co2',
'read_gml_halo',
'read_gml_met',
'read_gml_ozone',
'read_gml_radiation',
'read_surfrad',
],
'noaapsl': [
'read_psl_wind_profiler',
'read_psl_wind_profiler_temperature',
'read_psl_parsivel',
'read_psl_radar_fmcw_moment',
'read_psl_surface_met',
],
'pysp2': ['read_hk_file', 'read_sp2', 'read_sp2_dat'],
'sodar': ['read_mfas_sodar'],
'hysplit': ['read_hysplit']
},
)
<|code_end|>
act/io/hysplit.py
<|code_start|>import os
import xarray as xr
import numpy as np
import pandas as pd
from datetime import datetime
from .text import read_csv
def read_hysplit(filename, base_year=2000):
"""
Reads an input HYSPLIT trajectory for plotting in ACT.
Parameters
----------
filename: str
The input file name.
base_year: int
The first year of the century in which the data are contained.
Returns
-------
ds: xarray Dataset
The ACT dataset containing the HYSPLIT trajectories
"""
ds = xr.Dataset({})
num_lines = 0
with open(filename, 'r') as filebuf:
num_grids = int(filebuf.readline().split()[0])
num_lines += 1
grid_times = []
grid_names = []
forecast_hours = np.zeros(num_grids)
for i in range(num_grids):
data = filebuf.readline().split()
num_lines += 1
grid_names.append(data[0])
grid_times.append(
datetime(year=int(data[1]), month=int(data[2]), day=int(data[3]), hour=int(data[4])))
forecast_hours[i] = int(data[5])
ds["grid_forecast_hour"] = xr.DataArray(forecast_hours, dims=["num_grids"])
ds["grid_forecast_hour"].attrs["standard_name"] = "Grid forecast hour"
ds["grid_forecast_hour"].attrs["units"] = "Hour [UTC]"
ds["grid_times"] = xr.DataArray(np.array(grid_times), dims=["num_grids"])
data_line = filebuf.readline().split()
num_lines += 1
ds.attrs["trajectory_direction"] = data_line[1]
ds.attrs["vertical_motion_calculation_method"] = data_line[2]
num_traj = int(data_line[0])
traj_times = []
start_lats = np.zeros(num_traj)
start_lons = np.zeros(num_traj)
start_alt = np.zeros(num_traj)
for i in range(num_traj):
data = filebuf.readline().split()
num_lines += 1
traj_times.append(
datetime(year=(base_year + int(data[0])), month=int(data[1]),
day=int(data[2]), hour=int(data[3])))
start_lats[i] = float(data[4])
start_lons[i] = float(data[5])
start_alt[i] = float(data[6])
ds["start_latitude"] = xr.DataArray(start_lats, dims=["num_trajectories"])
ds["start_latitude"].attrs["long_name"] = "Trajectory start latitude"
ds["start_latitude"].attrs["units"] = "degree"
ds["start_longitude"] = xr.DataArray(start_lats, dims=["num_trajectories"])
ds["start_longitude"].attrs["long_name"] = "Trajectory start longitude"
ds["start_longitude"].attrs["units"] = "degree"
ds["start_altitude"] = xr.DataArray(start_alt, dims=["num_trajectories"])
ds["start_altitude"].attrs["long_name"] = "Trajectory start altitude"
ds["start_altitude"].attrs["units"] = "degree"
data = filebuf.readline().split()
num_lines += 1
var_list = ["trajectory_number", "grid_number", "year", "month", "day",
"hour", "minute", "forecast_hour", "age", "lat", "lon", "alt"]
for variable in data[1:]:
var_list.append(variable)
input_df = pd.read_csv(
filename, sep='\s+', index_col=False, names=var_list, skiprows=12)
input_df['year'] = base_year + input_df['year']
input_df['time'] = pd.to_datetime(input_df[["year", "month", "day", "hour", "minute"]],
format='%y%m%d%H%M')
input_df = input_df.set_index("time")
del input_df["year"]
del input_df["month"]
del input_df["day"]
del input_df["hour"]
del input_df["minute"]
ds = ds.merge(input_df.to_xarray())
ds.attrs['datastream'] = 'hysplit'
ds["trajectory_number"].attrs["standard_name"] = "Trajectory number"
ds["trajectory_number"].attrs["units"] = "1"
ds["grid_number"].attrs["standard_name"] = "Grid number"
ds["grid_number"].attrs["units"] = "1"
ds["age"].attrs["standard_name"] = "Grid number"
ds["age"].attrs["units"] = "1"
ds["lat"].attrs["standard_name"] = "Latitude"
ds["lat"].attrs["units"] = "degree"
ds["lon"].attrs["standard_name"] = "Longitude"
ds["lon"].attrs["units"] = "degree"
ds["alt"].attrs["standard_name"] = "Altitude"
ds["alt"].attrs["units"] = "meter"
return ds
<|code_end|>
examples/io/plot_hysplit.py
<|code_start|>"""
Read and plot a HYSPLIT trajectory file from a HYSPlIT run.
-----------------------------------------------------------
This example shows how to read and plot a backtrajectory calculated by the NOAA
HYSPLIT model over Houston.
Author: Robert Jackson
"""
import act
import matplotlib.pyplot as plt
from arm_test_data import DATASETS
# Load the data
filename = DATASETS.fetch('houstonaug300.0summer2010080100')
ds = act.io.read_hysplit(filename)
# Use the GeographicPlotDisplay object to make the plot
disp = act.plotting.GeographicPlotDisplay(ds)
disp.geoplot('PRESSURE', cartopy_feature=['STATES', 'OCEAN', 'LAND'])
plt.show()
<|code_end|>
|
Add warning to change units when fails
Need to add warning to change units method to indicate when a problem occurs, but don't raise an error as many string units are not correct or udunits compliant.
Describe what you were trying to get done.
Tell us what happened, what went wrong, and what you expected to happen.
### What I Did
```
Paste the command(s) you ran and the output.
If there was a crash, please include the traceback here.
```
|
act/io/hysplit.py
<|code_start|>import os
import xarray as xr
import numpy as np
import pandas as pd
from datetime import datetime
from .text import read_csv
def read_hysplit(filename, base_year=2000):
"""
Reads an input HYSPLIT trajectory for plotting in ACT.
Parameters
----------
filename: str
The input file name.
base_year: int
The first year of the century in which the data are contained.
Returns
-------
ds: xarray Dataset
The ACT dataset containing the HYSPLIT trajectories
"""
ds = xr.Dataset({})
num_lines = 0
with open(filename, 'r') as filebuf:
num_grids = int(filebuf.readline().split()[0])
num_lines += 1
grid_times = []
grid_names = []
forecast_hours = np.zeros(num_grids)
for i in range(num_grids):
data = filebuf.readline().split()
num_lines += 1
grid_names.append(data[0])
grid_times.append(
datetime(year=int(data[1]), month=int(data[2]), day=int(data[3]), hour=int(data[4])))
forecast_hours[i] = int(data[5])
ds["grid_forecast_hour"] = xr.DataArray(forecast_hours, dims=["num_grids"])
ds["grid_forecast_hour"].attrs["standard_name"] = "Grid forecast hour"
ds["grid_forecast_hour"].attrs["units"] = "Hour [UTC]"
ds["grid_times"] = xr.DataArray(np.array(grid_times), dims=["num_grids"])
data_line = filebuf.readline().split()
num_lines += 1
ds.attrs["trajectory_direction"] = data_line[1]
ds.attrs["vertical_motion_calculation_method"] = data_line[2]
num_traj = int(data_line[0])
traj_times = []
start_lats = np.zeros(num_traj)
start_lons = np.zeros(num_traj)
start_alt = np.zeros(num_traj)
for i in range(num_traj):
data = filebuf.readline().split()
num_lines += 1
traj_times.append(
datetime(year=(base_year + int(data[0])), month=int(data[1]),
day=int(data[2]), hour=int(data[3])))
start_lats[i] = float(data[4])
start_lons[i] = float(data[5])
start_alt[i] = float(data[6])
ds["start_latitude"] = xr.DataArray(start_lats, dims=["num_trajectories"])
ds["start_latitude"].attrs["long_name"] = "Trajectory start latitude"
ds["start_latitude"].attrs["units"] = "degree"
ds["start_longitude"] = xr.DataArray(start_lats, dims=["num_trajectories"])
ds["start_longitude"].attrs["long_name"] = "Trajectory start longitude"
ds["start_longitude"].attrs["units"] = "degree"
ds["start_altitude"] = xr.DataArray(start_alt, dims=["num_trajectories"])
ds["start_altitude"].attrs["long_name"] = "Trajectory start altitude"
ds["start_altitude"].attrs["units"] = "degree"
data = filebuf.readline().split()
num_lines += 1
var_list = ["trajectory_number", "grid_number", "year", "month", "day",
"hour", "minute", "forecast_hour", "age", "lat", "lon", "alt"]
for variable in data[1:]:
var_list.append(variable)
input_df = pd.read_csv(
filename, sep='\s+', index_col=False, names=var_list, skiprows=12)
input_df['year'] = base_year + input_df['year']
input_df['time'] = pd.to_datetime(input_df[["year", "month", "day", "hour", "minute"]],
format='%y%m%d%H%M')
input_df = input_df.set_index("time")
del input_df["year"]
del input_df["month"]
del input_df["day"]
del input_df["hour"]
del input_df["minute"]
ds = ds.merge(input_df.to_xarray())
ds.attrs['datastream'] = 'hysplit'
ds["trajectory_number"].attrs["standard_name"] = "Trajectory number"
ds["trajectory_number"].attrs["units"] = "1"
ds["grid_number"].attrs["standard_name"] = "Grid number"
ds["grid_number"].attrs["units"] = "1"
ds["age"].attrs["standard_name"] = "Grid number"
ds["age"].attrs["units"] = "1"
ds["lat"].attrs["standard_name"] = "Latitude"
ds["lat"].attrs["units"] = "degree"
ds["lon"].attrs["standard_name"] = "Longitude"
ds["lon"].attrs["units"] = "degree"
ds["alt"].attrs["standard_name"] = "Altitude"
ds["alt"].attrs["units"] = "meter"
return ds
<|code_end|>
act/utils/data_utils.py
<|code_start|>"""
Module containing utilities for the data.
"""
import importlib
import warnings
import json
import metpy
import numpy as np
import pint
import scipy.stats as stats
import xarray as xr
from pathlib import Path
import re
import requests
spec = importlib.util.find_spec('pyart')
if spec is not None:
PYART_AVAILABLE = True
else:
PYART_AVAILABLE = False
@xr.register_dataset_accessor('utils')
class ChangeUnits:
"""
Class for updating units in the dataset. Data values and units attribute
are updated in place. Coordinate variables can not be updated in place. Must
use new returned dataset when updating coordinage varibles.
"""
def __init__(self, ds):
self._ds = ds
def change_units(
self, variables=None, desired_unit=None, skip_variables=None, skip_standard=True
):
"""
Parameters
----------
variables : None, str or list of str
Variable names to attempt to change units.
desired_unit : str
Desired udunits unit string.
skip_variables : None, str or list of str
Variable names to skip. Works well when not providing a variables
keyword.
skip_standard : boolean
Flag indicating the QC variables that will not need changing are
skipped. Makes the processing faster when processing all variables
in dataset.
Returns
-------
dataset : xarray.dataset
A new dataset if the coordinate variables are updated. Required to
use returned dataset if coordinage variabels are updated,
otherwise the dataset is updated in place.
"""
if variables is not None and isinstance(variables, str):
variables = [variables]
if skip_variables is not None and isinstance(skip_variables, str):
skip_variables = [skip_variables]
if desired_unit is None:
raise ValueError("Need to provide 'desired_unit' keyword for .change_units() method")
if variables is None:
variables = list(self._ds.data_vars)
if skip_variables is not None:
variables = list(set(variables) - set(skip_variables))
for var_name in variables:
try:
if self._ds[var_name].attrs['standard_name'] == 'quality_flag':
continue
except KeyError:
pass
try:
data = convert_units(
self._ds[var_name].values,
self._ds[var_name].attrs['units'],
desired_unit,
)
try:
self._ds[var_name].values = data
self._ds[var_name].attrs['units'] = desired_unit
except ValueError:
attrs = self._ds[var_name].attrs
self._ds = self._ds.assign_coords({var_name: data})
attrs['units'] = desired_unit
self._ds[var_name].attrs = attrs
except (
KeyError,
pint.errors.DimensionalityError,
pint.errors.UndefinedUnitError,
np.core._exceptions.UFuncTypeError,
):
continue
return self._ds
# @xr.register_dataset_accessor('utils')
class DatastreamParserARM(object):
'''
Class to parse ARM datastream names or filenames into its components.
Will return None for each attribute if not extracted from the filename.
Attributes
----------
site : str or None
The site code extracted from the filename.
datastream_class : str
The datastream class extracted from the filename.
facility : str or None
The datastream facility code extracted from the filename.
level : str or None
The datastream level code extracted from the filename.
datastream : str or None
The datastram extracted from the filename.
date : str or None
The date extracted from the filename.
time : str or None
The time extracted from the filename.
ext : str or None
The file extension extracted from the filename.
Example
-------
>>> from act.utils.data_utils import DatastreamParserARM
>>> file = 'sgpmetE13.b1.20190501.024254.nc'
>>> fn_obj = DatastreamParserARM(file)
>>> fn_obj.site
'sgp'
>>> fn_obj.datastream_class
'met'
'''
def __init__(self, ds=''):
'''
Constructor that initializes datastream data member and runs
parse_datastream class method. Also converts datastream name to
lower case before parsing.
ds : str
The datastream or filename to parse
'''
if isinstance(ds, str):
self.__datastream = Path(ds).name
else:
raise ValueError('Datastream or filename name must be a string')
try:
self.__parse_datastream()
except ValueError:
self.__site = None
self.__class = None
self.__facility = None
self.__datastream = None
self.__level = None
self.__date = None
self.__time = None
self.__ext = None
def __parse_datastream(self):
'''
Private method to parse datastream name into its various components
(site, class, facility, and data level. Is called automatically by
constructor when object of class is instantiated and when the
set_datastream method is called to reset the object.
'''
# Import the built-in match function from regular expression library
# self.__datastream = self.__datastream
tempstring = self.__datastream.split('.')
# Check to see if ARM-standard filename was passed
self.__ext = None
self.__time = None
self.__date = None
self.__level = None
self.__site = None
self.__class = None
self.__facility = None
if len(tempstring) >= 5:
self.__ext = tempstring[4]
if len(tempstring) >= 4:
self.__time = tempstring[3]
if len(tempstring) >= 3:
self.__date = tempstring[2]
if len(tempstring) >= 2:
m = re.match('[abcs0][0123456789]', tempstring[1])
if m is not None:
self.__level = m.group()
match = False
m = re.search(r'(^[a-z]{3})(\w+)([A-Z]{1}\d{1,2})$', tempstring[0])
if m is not None:
self.__site = m.group(1)
self.__class = m.group(2)
self.__facility = m.group(3)
match = True
if not match:
m = re.search(r'(^[a-z]{3})(\w+)$', tempstring[0])
if m is not None:
self.__site = m.group(1)
self.__class = m.group(2)
match = True
if not match and len(tempstring[0]) == 3:
self.__site = tempstring[0]
match = True
if not match:
raise ValueError(self.__datastream)
def set_datastream(self, ds):
'''
Method used to set or reset object by passing a new datastream name.
'''
self.__init__(ds)
@property
def datastream(self):
'''
Property returning current datastream name stored in object in
standard lower case. Will return the datastrem with no level if
unavailable.
'''
try:
return ''.join((self.__site, self.__class, self.__facility, '.',
self.__level))
except TypeError:
return None
@property
def site(self):
'''
Property returning current site name stored in object in standard
lower case.
'''
return self.__site
@property
def datastream_class(self):
'''
Property returning current datastream class name stored in object in
standard lower case. Could not use class as attribute name since it
is a reserved word in Python
'''
return self.__class
@property
def facility(self):
'''
Property returning current facility name stored in object in
standard upper case.
'''
try:
return self.__facility.upper()
except AttributeError:
return self.__facility
@property
def level(self):
'''
Property returning current data level stored in object in standard
lower case.
'''
return self.__level
@property
def datastream_standard(self):
'''
Property returning datastream name in ARM-standard format with
facility in caps. Will return the datastream name with no level if
unavailable.
'''
try:
return ''.join((self.site, self.datastream_class, self.facility,
'.', self.level))
except TypeError:
return None
@property
def date(self):
'''
Property returning date from filename.
'''
return self.__date
@property
def time(self):
'''
Property returning time from filename.
'''
return self.__time
@property
def ext(self):
'''
Property returning file extension from filename.
'''
return self.__ext
def assign_coordinates(ds, coord_list):
"""
This procedure will create a new ACT dataset whose coordinates are
designated to be the variables in a given list. This helps make data
slicing via xarray and visualization easier.
Parameters
----------
ds : ACT Dataset
The ACT Dataset to modify the coordinates of.
coord_list : dict
The list of variables to assign as coordinates, given as a dictionary
whose keys are the variable name and values are the dimension name.
Returns
-------
new_ds : ACT Dataset
The new ACT Dataset with the coordinates assigned to be the given
variables.
"""
# Check to make sure that user assigned valid entries for coordinates
for coord in coord_list.keys():
if coord not in ds.variables.keys():
raise KeyError(coord + ' is not a variable in the Dataset.')
if ds.dims[coord_list[coord]] != len(ds.variables[coord]):
raise IndexError(
coord + ' must have the same ' + 'value as length of ' + coord_list[coord]
)
new_ds_dict = {}
for variable in ds.variables.keys():
my_coord_dict = {}
dataarray = ds[variable]
if len(dataarray.dims) > 0:
for coord in coord_list.keys():
if coord_list[coord] in dataarray.dims:
my_coord_dict[coord_list[coord]] = ds[coord]
if variable not in my_coord_dict.keys() and variable not in ds.dims:
the_dataarray = xr.DataArray(dataarray.data, coords=my_coord_dict, dims=dataarray.dims)
new_ds_dict[variable] = the_dataarray
new_ds = xr.Dataset(new_ds_dict, coords=my_coord_dict)
return new_ds
def add_in_nan(time, data):
"""
This procedure adds in NaNs when there is a larger than expected time step.
This is useful for timeseries where there is a gap in data and need a
NaN value to stop plotting from connecting data over the large data gap.
Parameters
----------
time : 1D array of numpy datetime64 or Xarray DataArray of datetime64
Times in the timeseries.
data : 1D or 2D numpy array or Xarray DataArray
Array containing the data. The 0 axis corresponds to time.
Returns
-------
time : numpy array or Xarray DataArray
The array containing the new times including a NaN filled
sampe or slice if multi-dimensional.
The intervals are determined by the mode of the timestep in *time*.
data : numpy array or Xarray DataArray
The array containing the NaN-indserted data.
"""
time_is_DataArray = False
data_is_DataArray = False
if isinstance(time, xr.core.dataarray.DataArray):
time_is_DataArray = True
time_attributes = time.attrs
time_dims = time.dims
if isinstance(data, xr.core.dataarray.DataArray):
data_is_DataArray = True
data_attributes = data.attrs
data_dims = data.dims
# Return if time dimension is only size one since we can't do differences.
if time.size > 2:
data = np.asarray(data)
time = np.asarray(time)
# Not sure if we need to set to second data type to make it work better.
# Leaving code in here in case we need to update.
# diff = np.diff(time.astype('datetime64[s]'), 1)
diff = np.diff(time, 1)
# Wrapping in a try to catch error while switching between numpy 1.10 to 1.11
try:
mode = stats.mode(diff, keepdims=True).mode[0]
except TypeError:
mode = stats.mode(diff).mode[0]
index = np.where(diff > (2.0 * mode))
offset = 0
for i in index[0]:
corr_i = i + offset
if len(data.shape) == 1:
# For line plotting adding a NaN will stop the connection of the line
# between points. So we just need to add a NaN anywhere between the points.
corr_i = i + offset
time_added = time[corr_i] + (time[corr_i + 1] - time[corr_i]) / 2.0
time = np.insert(time, corr_i + 1, time_added)
data = np.insert(data, corr_i + 1, np.nan, axis=0)
offset += 1
else:
# For 2D plots need to add a NaN right after and right before the data
# to correctly mitigate streaking with pcolormesh.
time_added_1 = time[corr_i] + 1 # One time step after
time_added_2 = time[corr_i + 1] - 1 # One time step before
time = np.insert(time, corr_i + 1, [time_added_1, time_added_2])
data = np.insert(data, corr_i + 1, np.nan, axis=0)
data = np.insert(data, corr_i + 2, np.nan, axis=0)
offset += 2
if time_is_DataArray:
time = xr.DataArray(time, attrs=time_attributes, dims=time_dims)
if data_is_DataArray:
data = xr.DataArray(data, attrs=data_attributes, dims=data_dims)
return time, data
def get_missing_value(
ds,
variable,
default=-9999,
add_if_missing_in_ds=False,
use_FillValue=False,
nodefault=False,
):
"""
Function to get missing value from missing_value or _FillValue attribute.
Works well with catching errors and allows for a default value when a
missing value is not listed in the dataset. You may get strange results
becaus xarray will automatically convert all missing_value or
_FillValue to NaN and then remove the missing_value and
_FillValue variable attribute when reading data with default settings.
Parameters
----------
ds : xarray.Dataset
Xarray dataset containing data variable.
variable : str
Variable name to use for getting missing value.
default : int or float
Default value to use if missing value attribute is not in dataset.
add_if_missing_in_ds : bool
Boolean to add to the dataset if does not exist. Default is False.
use_FillValue : bool
Boolean to use _FillValue instead of missing_value. If missing_value
does exist and _FillValue does not will add _FillValue
set to missing_value value.
nodefault : bool
Option to use this to check if the varible has a missing value set and
do not want to get default as return. If the missing value is found
will return, else will return None.
Returns
-------
missing : scalar int or float (or None)
Value used to indicate missing value matching type of data or None if
nodefault keyword set to True.
Examples
--------
.. code-block:: python
from act.utils import get_missing_value
missing = get_missing_value(dq_ds, "temp_mean")
print(missing)
-9999.0
"""
in_ds = False
if use_FillValue:
missing_atts = ['_FillValue', 'missing_value']
else:
missing_atts = ['missing_value', '_FillValue']
for att in missing_atts:
try:
missing = ds[variable].attrs[att]
in_ds = True
break
except (AttributeError, KeyError):
missing = default
# Check if do not want a default value retured and a value
# was not fund.
if nodefault is True and in_ds is False:
missing = None
return missing
# Check data type and try to match missing_value to the data type of data
try:
missing = ds[variable].data.dtype.type(missing)
except KeyError:
pass
except AttributeError:
print(
('--- AttributeError: Issue trying to get data type ' + 'from "{}" data ---').format(
variable
)
)
# If requested add missing value to the dataset
if add_if_missing_in_ds and not in_ds:
try:
ds[variable].attrs[missing_atts[0]] = missing
except KeyError:
print(
('--- KeyError: Issue trying to add "{}" ' + 'attribute to "{}" ---').format(
missing_atts[0], variable
)
)
return missing
def convert_units(data, in_units, out_units):
"""
Wrapper function around library to convert data using unit strings.
Currently using pint units library. Will attempt to preserve numpy
data type, but will upconvert to numpy float64 if need to change
data type for converted values.
Parameters
----------
data : list, tuple or numpy array
Data array to be modified.
in_units : str
Units scalar string of input data array.
out_units : str
Units scalar string of desired output data array.
Returns
-------
data : numpy array
Data array converted into new units.
Examples
--------
> data = np.array([1,2,3,4,5,6])
> data = convert_units(data, 'cm', 'm')
> data
array([0.01, 0.02, 0.03, 0.04, 0.05, 0.06])
"""
# Fix historical and current incorrect usage of units.
convert_dict = {
'C': 'degC',
'F': 'degF',
'%': 'percent', # Pint does not like this symbol with .to('%')
'1': 'unitless', # Pint does not like a number
}
if in_units in convert_dict:
in_units = convert_dict[in_units]
if out_units in convert_dict:
out_units = convert_dict[out_units]
if in_units == out_units:
return data
# Instantiate the registry
ureg = pint.UnitRegistry(autoconvert_offset_to_baseunit=True)
# Add missing units and conversions
ureg.define('fraction = []')
ureg.define('unitless = []')
if not isinstance(data, np.ndarray):
data = np.array(data)
data_type = data.dtype
data_type_kind = data.dtype.kind
# Do the conversion magic
data = (data * ureg(in_units)).to(out_units)
data = data.magnitude
# The data type may be changed by pint. This is a side effect
# of pint changing the datatype to float. Check if the converted values
# need float precision. If so leave, if not change back to orginal
# precision after checking if the precsion is not lost with the orginal
# data type.
if (
data_type_kind == 'i'
and np.nanmin(data) >= np.iinfo(data_type).min
and np.nanmax(data) <= np.iinfo(data_type).max
and np.all(np.mod(data, 1) == 0)
):
data = data.astype(data_type)
return data
def ts_weighted_average(ts_dict):
"""
Program to take in multiple difference time-series and average them
using the weights provided. This assumes that the variables passed in
all have the same units. Please see example gallery for an example.
NOTE: All weights should add up to 1
Parameters
----------
ts_dict : dict
Dictionary containing datastream, variable, weight, and datasets
.. code-block:: python
t_dict = {
"sgpvdisC1.b1": {
"variable": "rain_rate",
"weight": 0.05,
"ds": ds,
},
"sgpmetE13.b1": {
"variable": [
"tbrg_precip_total",
"org_precip_rate_mean",
"pwd_precip_rate_mean_1min",
],
"weight": [0.25, 0.05, 0.0125],
},
}
Returns
-------
data : numpy array
Variable of time-series averaged data
"""
# Run through each datastream/variable and get data
da_array = []
data = 0.0
for d in ts_dict:
for i, v in enumerate(ts_dict[d]['variable']):
new_name = '_'.join([d, v])
# Since many variables may have same name, rename with datastream
da = ts_dict[d]['ds'][v].rename(new_name)
# Apply Weights to Data
da.values = da.values * ts_dict[d]['weight'][i]
da_array.append(da)
da = xr.merge(da_array)
# Stack all the data into a 2D time series
data = None
for i, d in enumerate(da):
if i == 0:
data = da[d].values
else:
data = np.vstack((data, da[d].values))
# Sum data across each time sample
data = np.nansum(data, 0)
# Add data to data array and return
dims = ts_dict[list(ts_dict.keys())[0]]['ds'].dims
da_xr = xr.DataArray(
data,
dims=dims,
coords={'time': ts_dict[list(ts_dict.keys())[0]]['ds']['time']},
)
da_xr.attrs['long_name'] = 'Weighted average of ' + ', '.join(list(ts_dict.keys()))
return da_xr
def accumulate_precip(ds, variable, time_delta=None):
"""
Program to accumulate rain rates from an act xarray dataset and insert
variable back into an act xarray dataset with "_accumulated" appended to
the variable name. Please verify that your units are accurately described
in the data.
Parameters
----------
ds : xarray.DataSet
ACT Xarray dataset.
variable : string
Variable name.
time_delta : float
Time delta to caculate precip accumulations over.
Useful if full time series is not passed in.
Returns
-------
ds : xarray.DataSet
ACT Xarray dataset with variable_accumulated.
"""
# Get Data, time, and metadat
data = ds[variable]
time = ds.coords['time']
units = ds[variable].attrs['units']
# Calculate mode of the time samples(i.e. 1 min vs 1 sec)
if time_delta is None:
diff = np.diff(time.values, 1) / np.timedelta64(1, 's')
try:
t_delta = stats.mode(diff, keepdims=False).mode
except TypeError:
t_delta = stats.mode(diff).mode
else:
t_delta = time_delta
# Calculate the accumulation based on the units
t_factor = t_delta / 60.0
if units == 'mm/hr':
data = data * (t_factor / 60.0)
accum = np.nancumsum(data.values)
# Add accumulated variable back to the dataset
long_name = 'Accumulated precipitation'
attrs = {'long_name': long_name, 'units': 'mm'}
ds['_'.join([variable, 'accumulated'])] = xr.DataArray(
accum, coords=ds[variable].coords, attrs=attrs
)
return ds
def create_pyart_obj(
ds,
variables=None,
sweep=None,
azimuth=None,
elevation=None,
range_var=None,
sweep_start=None,
sweep_end=None,
lat=None,
lon=None,
alt=None,
sweep_mode='ppi',
sweep_az_thresh=10.0,
sweep_el_thresh=0.5,
):
"""
Produces a Py-ART radar object based on data in the ACT Xarray dataset.
Parameters
----------
ds : xarray.DataSet
ACT Xarray dataset.
variables : list
List of variables to add to the radar object, will default to all
variables.
sweep : string
Name of variable that has sweep information. If none, will try and
calculate from the azimuth and elevation.
azimuth : string
Name of azimuth variable. Will try and find one if none given.
elevation : string
Name of elevation variable. Will try and find one if none given.
range_var : string
Name of the range variable. Will try and find one if none given.
sweep_start : string
Name of variable with sweep start indices.
sweep_end : string
Name of variable with sweep end indices.
lat : string
Name of latitude variable. Will try and find one if none given.
lon : string
Name of longitude variable. Will try and find one if none given.
alt : string
Name of altitude variable. Will try and find one if none given.
sweep_mode : string
Type of scan. Defaults to PPI.
sweep_az_thresh : float
If calculating sweep numbers, the maximum change in azimuth before new
sweep.
sweep_el_thresh : float
If calculating sweep numbers, the maximum change in elevation before
new sweep.
Returns
-------
radar : radar.Radar
Py-ART Radar Object.
"""
if not PYART_AVAILABLE:
raise ImportError(
'Py-ART needs to be installed on your system to convert to ' 'Py-ART Object.'
)
else:
import pyart
# Get list of variables if none provided
if variables is None:
variables = list(ds.keys())
# Determine the sweeps if not already in a variable$a
if sweep is None:
swp = np.zeros(ds.sizes['time'])
for key in ds.variables.keys():
if len(ds.variables[key].shape) == 2:
total_rays = ds.variables[key].shape[0]
break
nsweeps = int(total_rays / ds.variables['time'].shape[0])
else:
swp = ds[sweep].values
nsweeps = ds[sweep].values
# Get coordinate variables
if lat is None:
lat = [s for s in variables if 'latitude' in s]
if len(lat) == 0:
lat = [s for s in variables if 'lat' in s]
if len(lat) == 0:
raise ValueError(
'Latitude variable not set and could not be ' 'discerned from the data.'
)
else:
lat = lat[0]
if lon is None:
lon = [s for s in variables if 'longitude' in s]
if len(lon) == 0:
lon = [s for s in variables if 'lon' in s]
if len(lon) == 0:
raise ValueError(
'Longitude variable not set and could not be ' 'discerned from the data.'
)
else:
lon = lon[0]
if alt is None:
alt = [s for s in variables if 'altitude' in s]
if len(alt) == 0:
alt = [s for s in variables if 'alt' in s]
if len(alt) == 0:
raise ValueError(
'Altitude variable not set and could not be ' 'discerned from the data.'
)
else:
alt = alt[0]
# Get additional variable names if none provided
if azimuth is None:
azimuth = [s for s in sorted(variables) if 'azimuth' in s][0]
if len(azimuth) == 0:
raise ValueError(
'Azimuth variable not set and could not be ' 'discerned from the data.'
)
if elevation is None:
elevation = [s for s in sorted(variables) if 'elevation' in s][0]
if len(elevation) == 0:
raise ValueError(
'Elevation variable not set and could not be ' 'discerned from the data.'
)
if range_var is None:
range_var = [s for s in sorted(variables) if 'range' in s][0]
if len(range_var) == 0:
raise ValueError('Range variable not set and could not be ' 'discerned from the data.')
# Calculate the sweep indices if not passed in
if sweep_start is None and sweep_end is None:
az_diff = np.abs(np.diff(ds[azimuth].values))
az_idx = az_diff > sweep_az_thresh
el_diff = np.abs(np.diff(ds[elevation].values))
el_idx = el_diff > sweep_el_thresh
# Create index list
az_index = list(np.where(az_idx)[0] + 1)
el_index = list(np.where(el_idx)[0] + 1)
index = sorted(az_index + el_index)
index.insert(0, 0)
index += [ds.sizes['time']]
sweep_start_index = []
sweep_end_index = []
for i in range(len(index) - 1):
sweep_start_index.append(index[i])
sweep_end_index.append(index[i + 1] - 1)
swp[index[i] : index[i + 1]] = i
else:
sweep_start_index = ds[sweep_start].values
sweep_end_index = ds[sweep_end].values
if sweep is None:
for i in range(len(sweep_start_index)):
swp[sweep_start_index[i] : sweep_end_index[i]] = i
radar = pyart.testing.make_empty_ppi_radar(ds.sizes[range_var], ds.sizes['time'], nsweeps)
radar.time['data'] = np.array(ds['time'].values)
# Add lat, lon, and alt
radar.latitude['data'] = np.array(ds[lat].values)
radar.longitude['data'] = np.array(ds[lon].values)
radar.altitude['data'] = np.array(ds[alt].values)
# Add sweep information
radar.sweep_number['data'] = swp
radar.sweep_start_ray_index['data'] = sweep_start_index
radar.sweep_end_ray_index['data'] = sweep_end_index
radar.sweep_mode['data'] = np.array(sweep_mode)
radar.scan_type = sweep_mode
# Add elevation, azimuth, etc...
radar.azimuth['data'] = np.array(ds[azimuth])
radar.elevation['data'] = np.array(ds[elevation])
radar.fixed_angle['data'] = np.array(ds[elevation].values[0])
radar.range['data'] = np.array(ds[range_var].values)
# Calculate radar points in lat/lon
radar.init_gate_altitude()
radar.init_gate_longitude_latitude()
# Add the fields to the radar object
fields = {}
for v in variables:
ref_dict = pyart.config.get_metadata(v)
ref_dict['data'] = np.array(ds[v].values)
fields[v] = ref_dict
radar.fields = fields
return radar
def convert_to_potential_temp(
ds=None,
temp_var_name=None,
press_var_name=None,
temperature=None,
pressure=None,
temp_var_units=None,
press_var_units=None,
):
"""
Converts temperature to potential temperature.
Parameters
----------
ds : xarray.DataSet
ACT Xarray dataset
temp_var_name : str
Temperature variable name in the ACT Xarray dataset containing
temperature data to convert.
press_var_name : str
Pressure variable name in the ACT Xarray dataset containing the
pressure data to use in conversion. If not set or set to None will
use values from pressure keyword.
pressure : int, float, numpy array
Optional pressure values to use instead of using values from xarray
dataset. If set must also set press_var_units keyword.
temp_var_units : string
Pint recognized units string for temperature data. If set to None will
use the units attribute under temperature variable in ds.
press_var_units : string
Pint recognized units string for pressure data. If set to None will
use the units attribute under pressure variable in the dataset. If using
the pressure keyword this must be set.
Returns
-------
potential_temperature : None, int, float, numpy array
The converted temperature to potential temperature or None if something
goes wrong.
References
----------
May, R. M., Arms, S. C., Marsh, P., Bruning, E., Leeman, J. R., Goebbert,
K., Thielen, J. E., and Bruick, Z., 2021: MetPy: A Python Package for
Meteorological Data. Unidata, https://github.com/Unidata/MetPy,
doi:10.5065/D6WW7G29.
"""
potential_temp = None
if temp_var_units is None and temp_var_name is not None:
temp_var_units = ds[temp_var_name].attrs['units']
if press_var_units is None and press_var_name is not None:
press_var_units = ds[press_var_name].attrs['units']
if press_var_units is None:
raise ValueError(
"Need to provide 'press_var_units' keyword " "when using 'pressure' keyword"
)
if temp_var_units is None:
raise ValueError(
"Need to provide 'temp_var_units' keyword " "when using 'temperature' keyword"
)
if temperature is not None:
temperature = metpy.units.units.Quantity(temperature, temp_var_units)
else:
temperature = metpy.units.units.Quantity(ds[temp_var_name].values, temp_var_units)
if pressure is not None:
pressure = metpy.units.units.Quantity(pressure, press_var_units)
else:
pressure = metpy.units.units.Quantity(ds[press_var_name].values, press_var_units)
with warnings.catch_warnings():
warnings.filterwarnings('ignore', category=RuntimeWarning)
potential_temp = metpy.calc.potential_temperature(pressure, temperature)
potential_temp = potential_temp.to(temp_var_units).magnitude
return potential_temp
def height_adjusted_temperature(
ds=None,
temp_var_name=None,
height_difference=0,
height_units='m',
press_var_name=None,
temperature=None,
temp_var_units=None,
pressure=101.325,
press_var_units='kPa',
):
"""
Converts temperature for change in height.
Parameters
----------
ds : xarray.DataSet, None
Optional Xarray dataset for retrieving pressure and temperature values.
Not needed if using temperature keyword.
temp_var_name : str, None
Optional temperature variable name in the Xarray dataset containing the
temperature data to use in conversion. If not set or set to None will
use values from temperature keyword.
height_difference : int, float
Required difference in height to adjust pressure values. Positive
values to increase height negative values to decrease height.
height_units : str
Units of height value.
press_var_name : str, None
Optional pressure variable name in the Xarray dataset containing the
pressure data to use in conversion. If not set or set to None will
use values from pressure keyword.
temperature : int, float, numpy array, None
Optional temperature values to use instead of values in the dataset.
temp_var_units : str, None
Pint recognized units string for temperature data. If set to None will
use the units attribute under temperature variable in the dataset.
If using the temperature keyword this must be set.
pressure : int, float, numpy array, None
Optional pressure values to use instead of values in the dataset.
Default value of sea level pressure is set for ease of use.
press_var_units : str, None
Pint recognized units string for pressure data. If set to None will
use the units attribute under pressure variable in the dataset.
If using the pressure keyword this must be set. Default value of
sea level pressure is set for ease of use.
Returns
-------
adjusted_temperature : None, int, float, numpy array
The height adjusted temperature or None if something goes wrong.
References
----------
May, R. M., Arms, S. C., Marsh, P., Bruning, E., Leeman, J. R., Goebbert,
K., Thielen, J. E., and Bruick, Z., 2021: MetPy: A Python Package for
Meteorological Data. Unidata, https://github.com/Unidata/MetPy,
doi:10.5065/D6WW7G29.
"""
adjusted_temperature = None
if temp_var_units is None and temperature is None:
temp_var_units = ds[temp_var_name].attrs['units']
if temp_var_units is None:
raise ValueError(
"Need to provide 'temp_var_units' keyword when " 'providing temperature keyword values.'
)
if temperature is not None:
temperature = metpy.units.units.Quantity(temperature, temp_var_units)
else:
temperature = metpy.units.units.Quantity(ds[temp_var_name].values, temp_var_units)
if press_var_name is not None:
pressure = metpy.units.units.Quantity(ds[press_var_name].values, press_var_units)
else:
pressure = metpy.units.units.Quantity(pressure, press_var_units)
adjusted_pressure = height_adjusted_pressure(
height_difference=height_difference,
height_units=height_units,
pressure=pressure.magnitude,
press_var_units=press_var_units,
)
adjusted_pressure = metpy.units.units.Quantity(adjusted_pressure, press_var_units)
with warnings.catch_warnings():
warnings.filterwarnings('ignore', category=RuntimeWarning)
adjusted_temperature = metpy.calc.dry_lapse(adjusted_pressure, temperature, pressure)
adjusted_temperature = adjusted_temperature.to(temp_var_units).magnitude
return adjusted_temperature
def height_adjusted_pressure(
ds=None,
press_var_name=None,
height_difference=0,
height_units='m',
pressure=None,
press_var_units=None,
):
"""
Converts pressure for change in height.
Parameters
----------
ds : xarray.DataSet, None
Optional Xarray dataset for retrieving pressure values. Not needed if
using pressure keyword.
press_var_name : str, None
Optional pressure variable name in the Xarray dataset containing the
pressure data to use in conversion. If not set or set to None will
use values from pressure keyword.
height_difference : int, float
Required difference in height to adjust pressure values. Positive
values to increase height negative values to decrease height.
height_units : str
Units of height value.
pressure : int, float, numpy array, None
Optional pressure values to use instead of values in the dataset.
press_var_units : str, None
Pint recognized units string for pressure data. If set to None will
use the units attribute under pressure variable in the dataset.
If using the pressure keyword this must be set.
Returns
-------
adjusted_pressure : None, int, float, numpy array
The height adjusted pressure or None if something goes wrong.
References
----------
May, R. M., Arms, S. C., Marsh, P., Bruning, E., Leeman, J. R., Goebbert,
K., Thielen, J. E., and Bruick, Z., 2021: MetPy: A Python Package for
Meteorological Data. Unidata, https://github.com/Unidata/MetPy,
doi:10.5065/D6WW7G29.
"""
adjusted_pressure = None
if press_var_units is None and pressure is None:
press_var_units = ds[press_var_name].attrs['units']
if press_var_units is None:
raise ValueError(
"Need to provide 'press_var_units' keyword when " 'providing pressure keyword values.'
)
if pressure is not None:
pressure = metpy.units.units.Quantity(pressure, press_var_units)
else:
pressure = metpy.units.units.Quantity(ds[press_var_name].values, press_var_units)
height_difference = metpy.units.units.Quantity(height_difference, height_units)
with warnings.catch_warnings():
warnings.filterwarnings('ignore', category=RuntimeWarning)
adjusted_pressure = metpy.calc.add_height_to_pressure(pressure, height_difference)
adjusted_pressure = adjusted_pressure.to(press_var_units).magnitude
return adjusted_pressure
def arm_site_location_search(site_code='sgp', facility_code=None):
"""
Parameters
----------
site_code : str
ARM site code to retrieve facilities and coordinate information. Example and default
is 'sgp'.
facility_code : str or None
Facility code or codes for the ARM site provided. If None is provided, all facilities are returned.
Example string for multiple facilities is 'A4,I5'.
Returns
-------
coord_dict : dict
A dictionary containing the facility chosen coordinate information or all facilities
if None for facility_code and their respective coordinates.
"""
headers = {
'Content-Type': 'application/json',
}
# Return all facilities if facility_code is None else set the query to include
# facility search
if facility_code is None:
query = "site_code:" + site_code
else:
query = "site_code:" + site_code + " AND facility_code:" + facility_code
# Search aggregation for elastic search
json_data = {
"aggs": {
"distinct_facility_code": {
"terms": {
"field": "facility_code.keyword",
"order": {
"_key": "asc"
},
"size": 7000,
},
"aggs": {
"hits": {
"top_hits": {
"_source": [
"site_type",
"site_code",
"facility_code",
"location",
],
"size": 1
},
},
},
},
},
"size": 0,
"query": {
"query_string": {
"query": query,
},
},
}
# Uses requests to grab metadata from arm.gov.
response = requests.get('https://adc.arm.gov/elastic/metadata/_search', headers=headers, json=json_data)
# Loads the text to a dictionary
response_dict = json.loads(response.text)
# Searches dictionary for the site, facility and coordinate information.
coord_dict = {}
# Loop through each facility.
for i in range(len(response_dict['aggregations']['distinct_facility_code']['buckets'])):
site_info = response_dict['aggregations']['distinct_facility_code']['buckets'][i]['hits']['hits']['hits'][0]['_source']
site = site_info['site_code']
facility = site_info['facility_code']
# Some sites do not contain coordinate information, return None if that is the case.
if site_info['location'] is None:
coords = {'latitude': None,
'longitude': None}
else:
lat, lon = site_info['location'].split(',')
lat = float(lat)
lon = float(lon)
coords = {'latitude': lat,
'longitude': lon}
coord_dict.setdefault(site + ' ' + facility, coords)
return coord_dict
<|code_end|>
examples/plotting/plot_scatter.py
<|code_start|>"""
Compare Aircraft Airspeeds
--------------------------
Compare Aircraft Airspeeds via the DistributionDisplay
Scatter Plot
Written: Joe O'Brien
"""
from arm_test_data import DATASETS
import matplotlib.pyplot as plt
import numpy as np
from scipy.stats.mstats import pearsonr
import act
from act.io.icartt import read_icartt
# Call the read_icartt function, which supports input
# for ICARTT (v2.0) formatted files.
# Example file is ARM Aerial Facility Navigation Data
filename_icartt = DATASETS.fetch('AAFNAV_COR_20181104_R0.ict')
ds = read_icartt(filename_icartt)
# Create a DistributionDisplay object to compare fields
display = act.plotting.DistributionDisplay(ds)
# Compare aircraft ground speed with indicated airspeed
display.plot_scatter('true_airspeed',
'ground_speed',
m_field='ambient_temp',
marker='x',
cbar_label='Ambient Temperature ($^\circ$C)'
)
# Set the range of the field on the x-axis
display.set_xrng((40, 140))
display.set_yrng((40, 140))
# Determine the best fit line
z = np.ma.polyfit(ds['true_airspeed'],
ds['ground_speed'],
1
)
p = np.poly1d(z)
# Plot the best fit line
display.axes[0].plot(ds['true_airspeed'],
p(ds['true_airspeed']),
'r',
linewidth=2
)
# Display the line equation
display.axes[0].text(45,
135,
"y = %.3fx + (%.3f)" % (z[0], z[1]),
color='r',
fontsize=12
)
# Calculate Pearson Correlation Coefficient
cc_conc = pearsonr(ds['true_airspeed'],
ds['ground_speed']
)
# Display the Pearson CC
display.axes[0].text(45,
130,
"Pearson CC: %.2f" % (cc_conc[0]),
fontsize=12
)
# Display the total number of samples
display.axes[0].text(45,
125,
"N = %.0f" % (ds['true_airspeed'].data.shape[0]),
fontsize=12
)
# Display the 1:1 ratio line
display.set_ratio_line()
plt.show()
<|code_end|>
|
act/io/hysplit.py
<|code_start|>import os
import xarray as xr
import numpy as np
import pandas as pd
from datetime import datetime
from .text import read_csv
def read_hysplit(filename, base_year=2000):
"""
Reads an input HYSPLIT trajectory for plotting in ACT.
Parameters
----------
filename: str
The input file name.
base_year: int
The first year of the century in which the data are contained.
Returns
-------
ds: xarray Dataset
The ACT dataset containing the HYSPLIT trajectories
"""
ds = xr.Dataset({})
num_lines = 0
with open(filename, 'r') as filebuf:
num_grids = int(filebuf.readline().split()[0])
num_lines += 1
grid_times = []
grid_names = []
forecast_hours = np.zeros(num_grids)
for i in range(num_grids):
data = filebuf.readline().split()
num_lines += 1
grid_names.append(data[0])
grid_times.append(
datetime(year=int(data[1]), month=int(data[2]), day=int(data[3]), hour=int(data[4])))
forecast_hours[i] = int(data[5])
ds["grid_forecast_hour"] = xr.DataArray(forecast_hours, dims=["num_grids"])
ds["grid_forecast_hour"].attrs["standard_name"] = "Grid forecast hour"
ds["grid_forecast_hour"].attrs["units"] = "Hour [UTC]"
ds["grid_times"] = xr.DataArray(np.array(grid_times), dims=["num_grids"])
data_line = filebuf.readline().split()
num_lines += 1
ds.attrs["trajectory_direction"] = data_line[1]
ds.attrs["vertical_motion_calculation_method"] = data_line[2]
num_traj = int(data_line[0])
traj_times = []
start_lats = np.zeros(num_traj)
start_lons = np.zeros(num_traj)
start_alt = np.zeros(num_traj)
for i in range(num_traj):
data = filebuf.readline().split()
num_lines += 1
traj_times.append(
datetime(year=(base_year + int(data[0])), month=int(data[1]),
day=int(data[2]), hour=int(data[3])))
start_lats[i] = float(data[4])
start_lons[i] = float(data[5])
start_alt[i] = float(data[6])
ds["start_latitude"] = xr.DataArray(start_lats, dims=["num_trajectories"])
ds["start_latitude"].attrs["long_name"] = "Trajectory start latitude"
ds["start_latitude"].attrs["units"] = "degree"
ds["start_longitude"] = xr.DataArray(start_lats, dims=["num_trajectories"])
ds["start_longitude"].attrs["long_name"] = "Trajectory start longitude"
ds["start_longitude"].attrs["units"] = "degree"
ds["start_altitude"] = xr.DataArray(start_alt, dims=["num_trajectories"])
ds["start_altitude"].attrs["long_name"] = "Trajectory start altitude"
ds["start_altitude"].attrs["units"] = "degree"
data = filebuf.readline().split()
num_lines += 1
var_list = ["trajectory_number", "grid_number", "year", "month", "day",
"hour", "minute", "forecast_hour", "age", "lat", "lon", "alt"]
for variable in data[1:]:
var_list.append(variable)
input_df = pd.read_csv(
filename, sep='\s+', index_col=False, names=var_list, skiprows=12) # noqa W605
input_df['year'] = base_year + input_df['year']
input_df['time'] = pd.to_datetime(input_df[["year", "month", "day", "hour", "minute"]],
format='%y%m%d%H%M')
input_df = input_df.set_index("time")
del input_df["year"]
del input_df["month"]
del input_df["day"]
del input_df["hour"]
del input_df["minute"]
ds = ds.merge(input_df.to_xarray())
ds.attrs['datastream'] = 'hysplit'
ds["trajectory_number"].attrs["standard_name"] = "Trajectory number"
ds["trajectory_number"].attrs["units"] = "1"
ds["grid_number"].attrs["standard_name"] = "Grid number"
ds["grid_number"].attrs["units"] = "1"
ds["age"].attrs["standard_name"] = "Grid number"
ds["age"].attrs["units"] = "1"
ds["lat"].attrs["standard_name"] = "Latitude"
ds["lat"].attrs["units"] = "degree"
ds["lon"].attrs["standard_name"] = "Longitude"
ds["lon"].attrs["units"] = "degree"
ds["alt"].attrs["standard_name"] = "Altitude"
ds["alt"].attrs["units"] = "meter"
return ds
<|code_end|>
act/utils/data_utils.py
<|code_start|>"""
Module containing utilities for the data.
"""
import importlib
import warnings
import json
import metpy
import numpy as np
import pint
import scipy.stats as stats
import xarray as xr
from pathlib import Path
import re
import requests
spec = importlib.util.find_spec('pyart')
if spec is not None:
PYART_AVAILABLE = True
else:
PYART_AVAILABLE = False
@xr.register_dataset_accessor('utils')
class ChangeUnits:
"""
Class for updating units in the dataset. Data values and units attribute
are updated in place. Coordinate variables can not be updated in place. Must
use new returned dataset when updating coordinage varibles.
"""
def __init__(self, ds):
self._ds = ds
def change_units(
self, variables=None, desired_unit=None, skip_variables=None, skip_standard=True,
verbose=False, raise_error=False
):
"""
Parameters
----------
variables : None, str or list of str
Variable names to attempt to change units.
desired_unit : str
Desired udunits unit string.
skip_variables : None, str or list of str
Variable names to skip. Works well when not providing a variables
keyword.
skip_standard : boolean
Flag indicating the QC variables that will not need changing are
skipped. Makes the processing faster when processing all variables
in dataset.
verbose : boolean
Option to print statement when an attempted conversion fails. Set to False
as default because many units strings are not udunits complient and when
trying to convert all varialbes of a type of units (eg temperature) the code
can print a lot of unecessary information.
raise_error : boolean
Raise an error if conversion is not successful.
Returns
-------
dataset : xarray.dataset
A new dataset if the coordinate variables are updated. Required to
use returned dataset if coordinage variabels are updated,
otherwise the dataset is updated in place.
"""
if variables is not None and isinstance(variables, str):
variables = [variables]
if skip_variables is not None and isinstance(skip_variables, str):
skip_variables = [skip_variables]
if desired_unit is None:
raise ValueError("Need to provide 'desired_unit' keyword for .change_units() method")
if variables is None:
variables = list(self._ds.data_vars)
if skip_variables is not None:
variables = list(set(variables) - set(skip_variables))
for var_name in variables:
try:
if self._ds[var_name].attrs['standard_name'] == 'quality_flag':
continue
except KeyError:
pass
try:
data = convert_units(
self._ds[var_name].values,
self._ds[var_name].attrs['units'],
desired_unit,
)
try:
self._ds[var_name].values = data
self._ds[var_name].attrs['units'] = desired_unit
except ValueError:
attrs = self._ds[var_name].attrs
self._ds = self._ds.assign_coords({var_name: data})
attrs['units'] = desired_unit
self._ds[var_name].attrs = attrs
except (
KeyError,
pint.errors.DimensionalityError,
pint.errors.UndefinedUnitError,
np.core._exceptions.UFuncTypeError,
):
if raise_error:
raise ValueError(f"Unable to convert '{var_name}' to units of '{desired_unit}'.")
elif verbose:
print(f"\n Unable to convert '{var_name}' to units of '{desired_unit}'. "
f"Skipping unit converstion for '{var_name}'.\n")
return self._ds
# @xr.register_dataset_accessor('utils')
class DatastreamParserARM(object):
'''
Class to parse ARM datastream names or filenames into its components.
Will return None for each attribute if not extracted from the filename.
Attributes
----------
site : str or None
The site code extracted from the filename.
datastream_class : str
The datastream class extracted from the filename.
facility : str or None
The datastream facility code extracted from the filename.
level : str or None
The datastream level code extracted from the filename.
datastream : str or None
The datastram extracted from the filename.
date : str or None
The date extracted from the filename.
time : str or None
The time extracted from the filename.
ext : str or None
The file extension extracted from the filename.
Example
-------
>>> from act.utils.data_utils import DatastreamParserARM
>>> file = 'sgpmetE13.b1.20190501.024254.nc'
>>> fn_obj = DatastreamParserARM(file)
>>> fn_obj.site
'sgp'
>>> fn_obj.datastream_class
'met'
'''
def __init__(self, ds=''):
'''
Constructor that initializes datastream data member and runs
parse_datastream class method. Also converts datastream name to
lower case before parsing.
ds : str
The datastream or filename to parse
'''
if isinstance(ds, str):
self.__datastream = Path(ds).name
else:
raise ValueError('Datastream or filename name must be a string')
try:
self.__parse_datastream()
except ValueError:
self.__site = None
self.__class = None
self.__facility = None
self.__datastream = None
self.__level = None
self.__date = None
self.__time = None
self.__ext = None
def __parse_datastream(self):
'''
Private method to parse datastream name into its various components
(site, class, facility, and data level. Is called automatically by
constructor when object of class is instantiated and when the
set_datastream method is called to reset the object.
'''
# Import the built-in match function from regular expression library
# self.__datastream = self.__datastream
tempstring = self.__datastream.split('.')
# Check to see if ARM-standard filename was passed
self.__ext = None
self.__time = None
self.__date = None
self.__level = None
self.__site = None
self.__class = None
self.__facility = None
if len(tempstring) >= 5:
self.__ext = tempstring[4]
if len(tempstring) >= 4:
self.__time = tempstring[3]
if len(tempstring) >= 3:
self.__date = tempstring[2]
if len(tempstring) >= 2:
m = re.match('[abcs0][0123456789]', tempstring[1])
if m is not None:
self.__level = m.group()
match = False
m = re.search(r'(^[a-z]{3})(\w+)([A-Z]{1}\d{1,2})$', tempstring[0])
if m is not None:
self.__site = m.group(1)
self.__class = m.group(2)
self.__facility = m.group(3)
match = True
if not match:
m = re.search(r'(^[a-z]{3})(\w+)$', tempstring[0])
if m is not None:
self.__site = m.group(1)
self.__class = m.group(2)
match = True
if not match and len(tempstring[0]) == 3:
self.__site = tempstring[0]
match = True
if not match:
raise ValueError(self.__datastream)
def set_datastream(self, ds):
'''
Method used to set or reset object by passing a new datastream name.
'''
self.__init__(ds)
@property
def datastream(self):
'''
Property returning current datastream name stored in object in
standard lower case. Will return the datastrem with no level if
unavailable.
'''
try:
return ''.join((self.__site, self.__class, self.__facility, '.',
self.__level))
except TypeError:
return None
@property
def site(self):
'''
Property returning current site name stored in object in standard
lower case.
'''
return self.__site
@property
def datastream_class(self):
'''
Property returning current datastream class name stored in object in
standard lower case. Could not use class as attribute name since it
is a reserved word in Python
'''
return self.__class
@property
def facility(self):
'''
Property returning current facility name stored in object in
standard upper case.
'''
try:
return self.__facility.upper()
except AttributeError:
return self.__facility
@property
def level(self):
'''
Property returning current data level stored in object in standard
lower case.
'''
return self.__level
@property
def datastream_standard(self):
'''
Property returning datastream name in ARM-standard format with
facility in caps. Will return the datastream name with no level if
unavailable.
'''
try:
return ''.join((self.site, self.datastream_class, self.facility,
'.', self.level))
except TypeError:
return None
@property
def date(self):
'''
Property returning date from filename.
'''
return self.__date
@property
def time(self):
'''
Property returning time from filename.
'''
return self.__time
@property
def ext(self):
'''
Property returning file extension from filename.
'''
return self.__ext
def assign_coordinates(ds, coord_list):
"""
This procedure will create a new ACT dataset whose coordinates are
designated to be the variables in a given list. This helps make data
slicing via xarray and visualization easier.
Parameters
----------
ds : ACT Dataset
The ACT Dataset to modify the coordinates of.
coord_list : dict
The list of variables to assign as coordinates, given as a dictionary
whose keys are the variable name and values are the dimension name.
Returns
-------
new_ds : ACT Dataset
The new ACT Dataset with the coordinates assigned to be the given
variables.
"""
# Check to make sure that user assigned valid entries for coordinates
for coord in coord_list.keys():
if coord not in ds.variables.keys():
raise KeyError(coord + ' is not a variable in the Dataset.')
if ds.dims[coord_list[coord]] != len(ds.variables[coord]):
raise IndexError(
coord + ' must have the same ' + 'value as length of ' + coord_list[coord]
)
new_ds_dict = {}
for variable in ds.variables.keys():
my_coord_dict = {}
dataarray = ds[variable]
if len(dataarray.dims) > 0:
for coord in coord_list.keys():
if coord_list[coord] in dataarray.dims:
my_coord_dict[coord_list[coord]] = ds[coord]
if variable not in my_coord_dict.keys() and variable not in ds.dims:
the_dataarray = xr.DataArray(dataarray.data, coords=my_coord_dict, dims=dataarray.dims)
new_ds_dict[variable] = the_dataarray
new_ds = xr.Dataset(new_ds_dict, coords=my_coord_dict)
return new_ds
def add_in_nan(time, data):
"""
This procedure adds in NaNs when there is a larger than expected time step.
This is useful for timeseries where there is a gap in data and need a
NaN value to stop plotting from connecting data over the large data gap.
Parameters
----------
time : 1D array of numpy datetime64 or Xarray DataArray of datetime64
Times in the timeseries.
data : 1D or 2D numpy array or Xarray DataArray
Array containing the data. The 0 axis corresponds to time.
Returns
-------
time : numpy array or Xarray DataArray
The array containing the new times including a NaN filled
sampe or slice if multi-dimensional.
The intervals are determined by the mode of the timestep in *time*.
data : numpy array or Xarray DataArray
The array containing the NaN-indserted data.
"""
time_is_DataArray = False
data_is_DataArray = False
if isinstance(time, xr.core.dataarray.DataArray):
time_is_DataArray = True
time_attributes = time.attrs
time_dims = time.dims
if isinstance(data, xr.core.dataarray.DataArray):
data_is_DataArray = True
data_attributes = data.attrs
data_dims = data.dims
# Return if time dimension is only size one since we can't do differences.
if time.size > 2:
data = np.asarray(data)
time = np.asarray(time)
# Not sure if we need to set to second data type to make it work better.
# Leaving code in here in case we need to update.
# diff = np.diff(time.astype('datetime64[s]'), 1)
diff = np.diff(time, 1)
# Wrapping in a try to catch error while switching between numpy 1.10 to 1.11
try:
mode = stats.mode(diff, keepdims=True).mode[0]
except TypeError:
mode = stats.mode(diff).mode[0]
index = np.where(diff > (2.0 * mode))
offset = 0
for i in index[0]:
corr_i = i + offset
if len(data.shape) == 1:
# For line plotting adding a NaN will stop the connection of the line
# between points. So we just need to add a NaN anywhere between the points.
corr_i = i + offset
time_added = time[corr_i] + (time[corr_i + 1] - time[corr_i]) / 2.0
time = np.insert(time, corr_i + 1, time_added)
data = np.insert(data, corr_i + 1, np.nan, axis=0)
offset += 1
else:
# For 2D plots need to add a NaN right after and right before the data
# to correctly mitigate streaking with pcolormesh.
time_added_1 = time[corr_i] + 1 # One time step after
time_added_2 = time[corr_i + 1] - 1 # One time step before
time = np.insert(time, corr_i + 1, [time_added_1, time_added_2])
data = np.insert(data, corr_i + 1, np.nan, axis=0)
data = np.insert(data, corr_i + 2, np.nan, axis=0)
offset += 2
if time_is_DataArray:
time = xr.DataArray(time, attrs=time_attributes, dims=time_dims)
if data_is_DataArray:
data = xr.DataArray(data, attrs=data_attributes, dims=data_dims)
return time, data
def get_missing_value(
ds,
variable,
default=-9999,
add_if_missing_in_ds=False,
use_FillValue=False,
nodefault=False,
):
"""
Function to get missing value from missing_value or _FillValue attribute.
Works well with catching errors and allows for a default value when a
missing value is not listed in the dataset. You may get strange results
becaus xarray will automatically convert all missing_value or
_FillValue to NaN and then remove the missing_value and
_FillValue variable attribute when reading data with default settings.
Parameters
----------
ds : xarray.Dataset
Xarray dataset containing data variable.
variable : str
Variable name to use for getting missing value.
default : int or float
Default value to use if missing value attribute is not in dataset.
add_if_missing_in_ds : bool
Boolean to add to the dataset if does not exist. Default is False.
use_FillValue : bool
Boolean to use _FillValue instead of missing_value. If missing_value
does exist and _FillValue does not will add _FillValue
set to missing_value value.
nodefault : bool
Option to use this to check if the varible has a missing value set and
do not want to get default as return. If the missing value is found
will return, else will return None.
Returns
-------
missing : scalar int or float (or None)
Value used to indicate missing value matching type of data or None if
nodefault keyword set to True.
Examples
--------
.. code-block:: python
from act.utils import get_missing_value
missing = get_missing_value(dq_ds, "temp_mean")
print(missing)
-9999.0
"""
in_ds = False
if use_FillValue:
missing_atts = ['_FillValue', 'missing_value']
else:
missing_atts = ['missing_value', '_FillValue']
for att in missing_atts:
try:
missing = ds[variable].attrs[att]
in_ds = True
break
except (AttributeError, KeyError):
missing = default
# Check if do not want a default value retured and a value
# was not fund.
if nodefault is True and in_ds is False:
missing = None
return missing
# Check data type and try to match missing_value to the data type of data
try:
missing = ds[variable].data.dtype.type(missing)
except KeyError:
pass
except AttributeError:
print(
('--- AttributeError: Issue trying to get data type ' + 'from "{}" data ---').format(
variable
)
)
# If requested add missing value to the dataset
if add_if_missing_in_ds and not in_ds:
try:
ds[variable].attrs[missing_atts[0]] = missing
except KeyError:
print(
('--- KeyError: Issue trying to add "{}" ' + 'attribute to "{}" ---').format(
missing_atts[0], variable
)
)
return missing
def convert_units(data, in_units, out_units):
"""
Wrapper function around library to convert data using unit strings.
Currently using pint units library. Will attempt to preserve numpy
data type, but will upconvert to numpy float64 if need to change
data type for converted values.
Parameters
----------
data : list, tuple or numpy array
Data array to be modified.
in_units : str
Units scalar string of input data array.
out_units : str
Units scalar string of desired output data array.
Returns
-------
data : numpy array
Data array converted into new units.
Examples
--------
> data = np.array([1,2,3,4,5,6])
> data = convert_units(data, 'cm', 'm')
> data
array([0.01, 0.02, 0.03, 0.04, 0.05, 0.06])
"""
# Fix historical and current incorrect usage of units.
convert_dict = {
'C': 'degC',
'F': 'degF',
'%': 'percent', # Pint does not like this symbol with .to('%')
'1': 'unitless', # Pint does not like a number
}
if in_units in convert_dict:
in_units = convert_dict[in_units]
if out_units in convert_dict:
out_units = convert_dict[out_units]
if in_units == out_units:
return data
# Instantiate the registry
ureg = pint.UnitRegistry(autoconvert_offset_to_baseunit=True)
# Add missing units and conversions
ureg.define('fraction = []')
ureg.define('unitless = []')
if not isinstance(data, np.ndarray):
data = np.array(data)
data_type = data.dtype
data_type_kind = data.dtype.kind
# Do the conversion magic
data = (data * ureg(in_units)).to(out_units)
data = data.magnitude
# The data type may be changed by pint. This is a side effect
# of pint changing the datatype to float. Check if the converted values
# need float precision. If so leave, if not change back to orginal
# precision after checking if the precsion is not lost with the orginal
# data type.
if (
data_type_kind == 'i'
and np.nanmin(data) >= np.iinfo(data_type).min
and np.nanmax(data) <= np.iinfo(data_type).max
and np.all(np.mod(data, 1) == 0)
):
data = data.astype(data_type)
return data
def ts_weighted_average(ts_dict):
"""
Program to take in multiple difference time-series and average them
using the weights provided. This assumes that the variables passed in
all have the same units. Please see example gallery for an example.
NOTE: All weights should add up to 1
Parameters
----------
ts_dict : dict
Dictionary containing datastream, variable, weight, and datasets
.. code-block:: python
t_dict = {
"sgpvdisC1.b1": {
"variable": "rain_rate",
"weight": 0.05,
"ds": ds,
},
"sgpmetE13.b1": {
"variable": [
"tbrg_precip_total",
"org_precip_rate_mean",
"pwd_precip_rate_mean_1min",
],
"weight": [0.25, 0.05, 0.0125],
},
}
Returns
-------
data : numpy array
Variable of time-series averaged data
"""
# Run through each datastream/variable and get data
da_array = []
data = 0.0
for d in ts_dict:
for i, v in enumerate(ts_dict[d]['variable']):
new_name = '_'.join([d, v])
# Since many variables may have same name, rename with datastream
da = ts_dict[d]['ds'][v].rename(new_name)
# Apply Weights to Data
da.values = da.values * ts_dict[d]['weight'][i]
da_array.append(da)
da = xr.merge(da_array)
# Stack all the data into a 2D time series
data = None
for i, d in enumerate(da):
if i == 0:
data = da[d].values
else:
data = np.vstack((data, da[d].values))
# Sum data across each time sample
data = np.nansum(data, 0)
# Add data to data array and return
dims = ts_dict[list(ts_dict.keys())[0]]['ds'].dims
da_xr = xr.DataArray(
data,
dims=dims,
coords={'time': ts_dict[list(ts_dict.keys())[0]]['ds']['time']},
)
da_xr.attrs['long_name'] = 'Weighted average of ' + ', '.join(list(ts_dict.keys()))
return da_xr
def accumulate_precip(ds, variable, time_delta=None):
"""
Program to accumulate rain rates from an act xarray dataset and insert
variable back into an act xarray dataset with "_accumulated" appended to
the variable name. Please verify that your units are accurately described
in the data.
Parameters
----------
ds : xarray.DataSet
ACT Xarray dataset.
variable : string
Variable name.
time_delta : float
Time delta to caculate precip accumulations over.
Useful if full time series is not passed in.
Returns
-------
ds : xarray.DataSet
ACT Xarray dataset with variable_accumulated.
"""
# Get Data, time, and metadat
data = ds[variable]
time = ds.coords['time']
units = ds[variable].attrs['units']
# Calculate mode of the time samples(i.e. 1 min vs 1 sec)
if time_delta is None:
diff = np.diff(time.values, 1) / np.timedelta64(1, 's')
try:
t_delta = stats.mode(diff, keepdims=False).mode
except TypeError:
t_delta = stats.mode(diff).mode
else:
t_delta = time_delta
# Calculate the accumulation based on the units
t_factor = t_delta / 60.0
if units == 'mm/hr':
data = data * (t_factor / 60.0)
accum = np.nancumsum(data.values)
# Add accumulated variable back to the dataset
long_name = 'Accumulated precipitation'
attrs = {'long_name': long_name, 'units': 'mm'}
ds['_'.join([variable, 'accumulated'])] = xr.DataArray(
accum, coords=ds[variable].coords, attrs=attrs
)
return ds
def create_pyart_obj(
ds,
variables=None,
sweep=None,
azimuth=None,
elevation=None,
range_var=None,
sweep_start=None,
sweep_end=None,
lat=None,
lon=None,
alt=None,
sweep_mode='ppi',
sweep_az_thresh=10.0,
sweep_el_thresh=0.5,
):
"""
Produces a Py-ART radar object based on data in the ACT Xarray dataset.
Parameters
----------
ds : xarray.DataSet
ACT Xarray dataset.
variables : list
List of variables to add to the radar object, will default to all
variables.
sweep : string
Name of variable that has sweep information. If none, will try and
calculate from the azimuth and elevation.
azimuth : string
Name of azimuth variable. Will try and find one if none given.
elevation : string
Name of elevation variable. Will try and find one if none given.
range_var : string
Name of the range variable. Will try and find one if none given.
sweep_start : string
Name of variable with sweep start indices.
sweep_end : string
Name of variable with sweep end indices.
lat : string
Name of latitude variable. Will try and find one if none given.
lon : string
Name of longitude variable. Will try and find one if none given.
alt : string
Name of altitude variable. Will try and find one if none given.
sweep_mode : string
Type of scan. Defaults to PPI.
sweep_az_thresh : float
If calculating sweep numbers, the maximum change in azimuth before new
sweep.
sweep_el_thresh : float
If calculating sweep numbers, the maximum change in elevation before
new sweep.
Returns
-------
radar : radar.Radar
Py-ART Radar Object.
"""
if not PYART_AVAILABLE:
raise ImportError(
'Py-ART needs to be installed on your system to convert to ' 'Py-ART Object.'
)
else:
import pyart
# Get list of variables if none provided
if variables is None:
variables = list(ds.keys())
# Determine the sweeps if not already in a variable$a
if sweep is None:
swp = np.zeros(ds.sizes['time'])
for key in ds.variables.keys():
if len(ds.variables[key].shape) == 2:
total_rays = ds.variables[key].shape[0]
break
nsweeps = int(total_rays / ds.variables['time'].shape[0])
else:
swp = ds[sweep].values
nsweeps = ds[sweep].values
# Get coordinate variables
if lat is None:
lat = [s for s in variables if 'latitude' in s]
if len(lat) == 0:
lat = [s for s in variables if 'lat' in s]
if len(lat) == 0:
raise ValueError(
'Latitude variable not set and could not be ' 'discerned from the data.'
)
else:
lat = lat[0]
if lon is None:
lon = [s for s in variables if 'longitude' in s]
if len(lon) == 0:
lon = [s for s in variables if 'lon' in s]
if len(lon) == 0:
raise ValueError(
'Longitude variable not set and could not be ' 'discerned from the data.'
)
else:
lon = lon[0]
if alt is None:
alt = [s for s in variables if 'altitude' in s]
if len(alt) == 0:
alt = [s for s in variables if 'alt' in s]
if len(alt) == 0:
raise ValueError(
'Altitude variable not set and could not be ' 'discerned from the data.'
)
else:
alt = alt[0]
# Get additional variable names if none provided
if azimuth is None:
azimuth = [s for s in sorted(variables) if 'azimuth' in s][0]
if len(azimuth) == 0:
raise ValueError(
'Azimuth variable not set and could not be ' 'discerned from the data.'
)
if elevation is None:
elevation = [s for s in sorted(variables) if 'elevation' in s][0]
if len(elevation) == 0:
raise ValueError(
'Elevation variable not set and could not be ' 'discerned from the data.'
)
if range_var is None:
range_var = [s for s in sorted(variables) if 'range' in s][0]
if len(range_var) == 0:
raise ValueError('Range variable not set and could not be ' 'discerned from the data.')
# Calculate the sweep indices if not passed in
if sweep_start is None and sweep_end is None:
az_diff = np.abs(np.diff(ds[azimuth].values))
az_idx = az_diff > sweep_az_thresh
el_diff = np.abs(np.diff(ds[elevation].values))
el_idx = el_diff > sweep_el_thresh
# Create index list
az_index = list(np.where(az_idx)[0] + 1)
el_index = list(np.where(el_idx)[0] + 1)
index = sorted(az_index + el_index)
index.insert(0, 0)
index += [ds.sizes['time']]
sweep_start_index = []
sweep_end_index = []
for i in range(len(index) - 1):
sweep_start_index.append(index[i])
sweep_end_index.append(index[i + 1] - 1)
swp[index[i] : index[i + 1]] = i
else:
sweep_start_index = ds[sweep_start].values
sweep_end_index = ds[sweep_end].values
if sweep is None:
for i in range(len(sweep_start_index)):
swp[sweep_start_index[i] : sweep_end_index[i]] = i
radar = pyart.testing.make_empty_ppi_radar(ds.sizes[range_var], ds.sizes['time'], nsweeps)
radar.time['data'] = np.array(ds['time'].values)
# Add lat, lon, and alt
radar.latitude['data'] = np.array(ds[lat].values)
radar.longitude['data'] = np.array(ds[lon].values)
radar.altitude['data'] = np.array(ds[alt].values)
# Add sweep information
radar.sweep_number['data'] = swp
radar.sweep_start_ray_index['data'] = sweep_start_index
radar.sweep_end_ray_index['data'] = sweep_end_index
radar.sweep_mode['data'] = np.array(sweep_mode)
radar.scan_type = sweep_mode
# Add elevation, azimuth, etc...
radar.azimuth['data'] = np.array(ds[azimuth])
radar.elevation['data'] = np.array(ds[elevation])
radar.fixed_angle['data'] = np.array(ds[elevation].values[0])
radar.range['data'] = np.array(ds[range_var].values)
# Calculate radar points in lat/lon
radar.init_gate_altitude()
radar.init_gate_longitude_latitude()
# Add the fields to the radar object
fields = {}
for v in variables:
ref_dict = pyart.config.get_metadata(v)
ref_dict['data'] = np.array(ds[v].values)
fields[v] = ref_dict
radar.fields = fields
return radar
def convert_to_potential_temp(
ds=None,
temp_var_name=None,
press_var_name=None,
temperature=None,
pressure=None,
temp_var_units=None,
press_var_units=None,
):
"""
Converts temperature to potential temperature.
Parameters
----------
ds : xarray.DataSet
ACT Xarray dataset
temp_var_name : str
Temperature variable name in the ACT Xarray dataset containing
temperature data to convert.
press_var_name : str
Pressure variable name in the ACT Xarray dataset containing the
pressure data to use in conversion. If not set or set to None will
use values from pressure keyword.
pressure : int, float, numpy array
Optional pressure values to use instead of using values from xarray
dataset. If set must also set press_var_units keyword.
temp_var_units : string
Pint recognized units string for temperature data. If set to None will
use the units attribute under temperature variable in ds.
press_var_units : string
Pint recognized units string for pressure data. If set to None will
use the units attribute under pressure variable in the dataset. If using
the pressure keyword this must be set.
Returns
-------
potential_temperature : None, int, float, numpy array
The converted temperature to potential temperature or None if something
goes wrong.
References
----------
May, R. M., Arms, S. C., Marsh, P., Bruning, E., Leeman, J. R., Goebbert,
K., Thielen, J. E., and Bruick, Z., 2021: MetPy: A Python Package for
Meteorological Data. Unidata, https://github.com/Unidata/MetPy,
doi:10.5065/D6WW7G29.
"""
potential_temp = None
if temp_var_units is None and temp_var_name is not None:
temp_var_units = ds[temp_var_name].attrs['units']
if press_var_units is None and press_var_name is not None:
press_var_units = ds[press_var_name].attrs['units']
if press_var_units is None:
raise ValueError(
"Need to provide 'press_var_units' keyword " "when using 'pressure' keyword"
)
if temp_var_units is None:
raise ValueError(
"Need to provide 'temp_var_units' keyword " "when using 'temperature' keyword"
)
if temperature is not None:
temperature = metpy.units.units.Quantity(temperature, temp_var_units)
else:
temperature = metpy.units.units.Quantity(ds[temp_var_name].values, temp_var_units)
if pressure is not None:
pressure = metpy.units.units.Quantity(pressure, press_var_units)
else:
pressure = metpy.units.units.Quantity(ds[press_var_name].values, press_var_units)
with warnings.catch_warnings():
warnings.filterwarnings('ignore', category=RuntimeWarning)
potential_temp = metpy.calc.potential_temperature(pressure, temperature)
potential_temp = potential_temp.to(temp_var_units).magnitude
return potential_temp
def height_adjusted_temperature(
ds=None,
temp_var_name=None,
height_difference=0,
height_units='m',
press_var_name=None,
temperature=None,
temp_var_units=None,
pressure=101.325,
press_var_units='kPa',
):
"""
Converts temperature for change in height.
Parameters
----------
ds : xarray.DataSet, None
Optional Xarray dataset for retrieving pressure and temperature values.
Not needed if using temperature keyword.
temp_var_name : str, None
Optional temperature variable name in the Xarray dataset containing the
temperature data to use in conversion. If not set or set to None will
use values from temperature keyword.
height_difference : int, float
Required difference in height to adjust pressure values. Positive
values to increase height negative values to decrease height.
height_units : str
Units of height value.
press_var_name : str, None
Optional pressure variable name in the Xarray dataset containing the
pressure data to use in conversion. If not set or set to None will
use values from pressure keyword.
temperature : int, float, numpy array, None
Optional temperature values to use instead of values in the dataset.
temp_var_units : str, None
Pint recognized units string for temperature data. If set to None will
use the units attribute under temperature variable in the dataset.
If using the temperature keyword this must be set.
pressure : int, float, numpy array, None
Optional pressure values to use instead of values in the dataset.
Default value of sea level pressure is set for ease of use.
press_var_units : str, None
Pint recognized units string for pressure data. If set to None will
use the units attribute under pressure variable in the dataset.
If using the pressure keyword this must be set. Default value of
sea level pressure is set for ease of use.
Returns
-------
adjusted_temperature : None, int, float, numpy array
The height adjusted temperature or None if something goes wrong.
References
----------
May, R. M., Arms, S. C., Marsh, P., Bruning, E., Leeman, J. R., Goebbert,
K., Thielen, J. E., and Bruick, Z., 2021: MetPy: A Python Package for
Meteorological Data. Unidata, https://github.com/Unidata/MetPy,
doi:10.5065/D6WW7G29.
"""
adjusted_temperature = None
if temp_var_units is None and temperature is None:
temp_var_units = ds[temp_var_name].attrs['units']
if temp_var_units is None:
raise ValueError(
"Need to provide 'temp_var_units' keyword when " 'providing temperature keyword values.'
)
if temperature is not None:
temperature = metpy.units.units.Quantity(temperature, temp_var_units)
else:
temperature = metpy.units.units.Quantity(ds[temp_var_name].values, temp_var_units)
if press_var_name is not None:
pressure = metpy.units.units.Quantity(ds[press_var_name].values, press_var_units)
else:
pressure = metpy.units.units.Quantity(pressure, press_var_units)
adjusted_pressure = height_adjusted_pressure(
height_difference=height_difference,
height_units=height_units,
pressure=pressure.magnitude,
press_var_units=press_var_units,
)
adjusted_pressure = metpy.units.units.Quantity(adjusted_pressure, press_var_units)
with warnings.catch_warnings():
warnings.filterwarnings('ignore', category=RuntimeWarning)
adjusted_temperature = metpy.calc.dry_lapse(adjusted_pressure, temperature, pressure)
adjusted_temperature = adjusted_temperature.to(temp_var_units).magnitude
return adjusted_temperature
def height_adjusted_pressure(
ds=None,
press_var_name=None,
height_difference=0,
height_units='m',
pressure=None,
press_var_units=None,
):
"""
Converts pressure for change in height.
Parameters
----------
ds : xarray.DataSet, None
Optional Xarray dataset for retrieving pressure values. Not needed if
using pressure keyword.
press_var_name : str, None
Optional pressure variable name in the Xarray dataset containing the
pressure data to use in conversion. If not set or set to None will
use values from pressure keyword.
height_difference : int, float
Required difference in height to adjust pressure values. Positive
values to increase height negative values to decrease height.
height_units : str
Units of height value.
pressure : int, float, numpy array, None
Optional pressure values to use instead of values in the dataset.
press_var_units : str, None
Pint recognized units string for pressure data. If set to None will
use the units attribute under pressure variable in the dataset.
If using the pressure keyword this must be set.
Returns
-------
adjusted_pressure : None, int, float, numpy array
The height adjusted pressure or None if something goes wrong.
References
----------
May, R. M., Arms, S. C., Marsh, P., Bruning, E., Leeman, J. R., Goebbert,
K., Thielen, J. E., and Bruick, Z., 2021: MetPy: A Python Package for
Meteorological Data. Unidata, https://github.com/Unidata/MetPy,
doi:10.5065/D6WW7G29.
"""
adjusted_pressure = None
if press_var_units is None and pressure is None:
press_var_units = ds[press_var_name].attrs['units']
if press_var_units is None:
raise ValueError(
"Need to provide 'press_var_units' keyword when " 'providing pressure keyword values.'
)
if pressure is not None:
pressure = metpy.units.units.Quantity(pressure, press_var_units)
else:
pressure = metpy.units.units.Quantity(ds[press_var_name].values, press_var_units)
height_difference = metpy.units.units.Quantity(height_difference, height_units)
with warnings.catch_warnings():
warnings.filterwarnings('ignore', category=RuntimeWarning)
adjusted_pressure = metpy.calc.add_height_to_pressure(pressure, height_difference)
adjusted_pressure = adjusted_pressure.to(press_var_units).magnitude
return adjusted_pressure
def arm_site_location_search(site_code='sgp', facility_code=None):
"""
Parameters
----------
site_code : str
ARM site code to retrieve facilities and coordinate information. Example and default
is 'sgp'.
facility_code : str or None
Facility code or codes for the ARM site provided. If None is provided, all facilities are returned.
Example string for multiple facilities is 'A4,I5'.
Returns
-------
coord_dict : dict
A dictionary containing the facility chosen coordinate information or all facilities
if None for facility_code and their respective coordinates.
"""
headers = {
'Content-Type': 'application/json',
}
# Return all facilities if facility_code is None else set the query to include
# facility search
if facility_code is None:
query = "site_code:" + site_code
else:
query = "site_code:" + site_code + " AND facility_code:" + facility_code
# Search aggregation for elastic search
json_data = {
"aggs": {
"distinct_facility_code": {
"terms": {
"field": "facility_code.keyword",
"order": {
"_key": "asc"
},
"size": 7000,
},
"aggs": {
"hits": {
"top_hits": {
"_source": [
"site_type",
"site_code",
"facility_code",
"location",
],
"size": 1
},
},
},
},
},
"size": 0,
"query": {
"query_string": {
"query": query,
},
},
}
# Uses requests to grab metadata from arm.gov.
response = requests.get('https://adc.arm.gov/elastic/metadata/_search', headers=headers, json=json_data)
# Loads the text to a dictionary
response_dict = json.loads(response.text)
# Searches dictionary for the site, facility and coordinate information.
coord_dict = {}
# Loop through each facility.
for i in range(len(response_dict['aggregations']['distinct_facility_code']['buckets'])):
site_info = response_dict['aggregations']['distinct_facility_code']['buckets'][i]['hits']['hits']['hits'][0]['_source']
site = site_info['site_code']
facility = site_info['facility_code']
# Some sites do not contain coordinate information, return None if that is the case.
if site_info['location'] is None:
coords = {'latitude': None,
'longitude': None}
else:
lat, lon = site_info['location'].split(',')
lat = float(lat)
lon = float(lon)
coords = {'latitude': lat,
'longitude': lon}
coord_dict.setdefault(site + ' ' + facility, coords)
return coord_dict
<|code_end|>
examples/plotting/plot_scatter.py
<|code_start|>"""
Compare Aircraft Airspeeds
--------------------------
Compare Aircraft Airspeeds via the DistributionDisplay
Scatter Plot
Written: Joe O'Brien
"""
from arm_test_data import DATASETS
import matplotlib.pyplot as plt
import numpy as np
from scipy.stats.mstats import pearsonr
import act
from act.io.icartt import read_icartt
# Call the read_icartt function, which supports input
# for ICARTT (v2.0) formatted files.
# Example file is ARM Aerial Facility Navigation Data
filename_icartt = DATASETS.fetch('AAFNAV_COR_20181104_R0.ict')
ds = read_icartt(filename_icartt)
# Create a DistributionDisplay object to compare fields
display = act.plotting.DistributionDisplay(ds)
# Compare aircraft ground speed with indicated airspeed
display.plot_scatter('true_airspeed',
'ground_speed',
m_field='ambient_temp',
marker='x',
cbar_label='Ambient Temperature ($^\circ$C)' # noqa W605
)
# Set the range of the field on the x-axis
display.set_xrng((40, 140))
display.set_yrng((40, 140))
# Determine the best fit line
z = np.ma.polyfit(ds['true_airspeed'],
ds['ground_speed'],
1
)
p = np.poly1d(z)
# Plot the best fit line
display.axes[0].plot(ds['true_airspeed'],
p(ds['true_airspeed']),
'r',
linewidth=2
)
# Display the line equation
display.axes[0].text(45,
135,
"y = %.3fx + (%.3f)" % (z[0], z[1]),
color='r',
fontsize=12
)
# Calculate Pearson Correlation Coefficient
cc_conc = pearsonr(ds['true_airspeed'],
ds['ground_speed']
)
# Display the Pearson CC
display.axes[0].text(45,
130,
"Pearson CC: %.2f" % (cc_conc[0]),
fontsize=12
)
# Display the total number of samples
display.axes[0].text(45,
125,
"N = %.0f" % (ds['true_airspeed'].data.shape[0]),
fontsize=12
)
# Display the 1:1 ratio line
display.set_ratio_line()
plt.show()
<|code_end|>
|
Bug in xsection plot map code
* ACT version: Current Version
* Python version: All
* Operating System: All
### Description
xsection plot map is generating images with duplicate axes, see image below. I believe this is probably the cause to our baseline image failure.

|
act/plotting/xsectiondisplay.py
<|code_start|>"""
Stores the class for XSectionDisplay.
"""
# Import third party libraries
import matplotlib.pyplot as plt
import numpy as np
try:
import cartopy.crs as ccrs
CARTOPY_AVAILABLE = True
except ImportError:
CARTOPY_AVAILABLE = False
# Import Local Libs
from ..utils import data_utils
from .plot import Display
class XSectionDisplay(Display):
"""
Plots cross sections of multidimensional datasets. The data
must be able to be sliced into a 2 dimensional slice using the
xarray :func:`xarray.Dataset.sel` and :func:`xarray.Dataset.isel` commands.
This is inherited from the :func:`act.plotting.Display`
class and has therefore has the same attributes as that class.
See :func:`act.plotting.Display`
for more information. There are no additional attributes or parameters
to this class.
In order to create geographic plots, ACT needs the Cartopy package to be
installed on your system. More information about
Cartopy go here:https://scitools.org.uk/cartopy/docs/latest/.
Examples
--------
For example, if you only want to do a cross section through the first
time period of a 3D dataset called :code:`ir_temperature`, you would
do the following in xarray:
.. code-block:: python
time_slice = my_ds["ir_temperature"].isel(time=0)
The methods of this class support passing in keyword arguments into
xarray :func:`xarray.Dataset.sel` and :func:`xarray.Dataset.isel` commands
so that new datasets do not need to be created when slicing by specific time
periods or spatial slices. For example, to plot the first time period
from :code:`my_ds`, simply do:
.. code-block:: python
xsection = XSectionDisplay(my_ds, figsize=(15, 8))
xsection.plot_xsection_map(
None,
"ir_temperature",
vmin=220,
vmax=300,
cmap="Greys",
x="longitude",
y="latitude",
isel_kwargs={"time": 0},
)
Here, the array is sliced by the first time period as specified
in :code:`isel_kwargs`. The other keyword arguments are standard keyword
arguments taken by :func:`matplotlib.pyplot.pcolormesh`.
"""
def __init__(self, ds, subplot_shape=(1,), ds_name=None, **kwargs):
super().__init__(ds, subplot_shape, ds_name, **kwargs)
def set_subplot_to_map(self, subplot_index):
total_num_plots = self.axes.shape
if len(total_num_plots) == 2:
second_number = total_num_plots[0]
j = subplot_index[1]
else:
second_number = 1
j = 0
third_number = second_number * subplot_index[0] + j + 1
self.axes[subplot_index] = plt.subplot(
total_num_plots[0],
second_number,
third_number,
projection=ccrs.PlateCarree(),
)
def set_xrng(self, xrng, subplot_index=(0,)):
"""
Sets the x range of the plot.
Parameters
----------
xrng : 2 number array
The x limits of the plot.
subplot_index : 1 or 2D tuple, list, or array
The index of the subplot to set the x range of.
"""
if self.axes is None:
raise RuntimeError('set_xrng requires the plot to be displayed.')
if not hasattr(self, 'xrng') and len(self.axes.shape) == 2:
self.xrng = np.zeros((self.axes.shape[0], self.axes.shape[1], 2), dtype=xrng[0].dtype)
elif not hasattr(self, 'xrng') and len(self.axes.shape) == 1:
self.xrng = np.zeros((self.axes.shape[0], 2), dtype=xrng[0].dtype)
self.axes[subplot_index].set_xlim(xrng)
self.xrng[subplot_index, :] = np.array(xrng)
def set_yrng(self, yrng, subplot_index=(0,)):
"""
Sets the y range of the plot.
Parameters
----------
yrng : 2 number array
The y limits of the plot.
subplot_index : 1 or 2D tuple, list, or array
The index of the subplot to set the x range of.
"""
if self.axes is None:
raise RuntimeError('set_yrng requires the plot to be displayed.')
if not hasattr(self, 'yrng') and len(self.axes.shape) == 2:
self.yrng = np.zeros((self.axes.shape[0], self.axes.shape[1], 2), dtype=yrng[0].dtype)
elif not hasattr(self, 'yrng') and len(self.axes.shape) == 1:
self.yrng = np.zeros((self.axes.shape[0], 2), dtype=yrng[0].dtype)
if yrng[0] == yrng[1]:
yrng[1] = yrng[1] + 1
self.axes[subplot_index].set_ylim(yrng)
self.yrng[subplot_index, :] = yrng
def plot_xsection(
self,
dsname,
varname,
x=None,
y=None,
subplot_index=(0,),
sel_kwargs=None,
isel_kwargs=None,
**kwargs,
):
"""
This function plots a cross section whose x and y coordinates are
specified by the variable names either provided by the user or
automatically detected by xarray.
Parameters
----------
dsname : str or None
The name of the datastream to plot from. Set to None to have
ACT attempt to automatically detect this.
varname : str
The name of the variable to plot.
x : str or None
The name of the x coordinate variable.
y : str or None
The name of the y coordinate variable.
subplot_index : tuple
The index of the subplot to create the plot in.
sel_kwargs : dict
The keyword arguments to pass into :py:func:`xarray.DataArray.sel`
This is useful when your data is in 3 or more dimensions and you
want to only view a cross section on a specific x-y plane. For more
information on how to use xarray's .sel and .isel functionality
to slice datasets, see the documentation on :func:`xarray.DataArray.sel`.
isel_kwargs : dict
The keyword arguments to pass into :py:func:`xarray.DataArray.sel`
**kwargs : keyword arguments
Additional keyword arguments will be passed into
:func:`xarray.DataArray.plot`.
Returns
-------
ax : matplotlib axis handle
The matplotlib axis handle corresponding to the plot.
"""
if dsname is None and len(self._ds.keys()) > 1:
raise ValueError(
'You must choose a datastream when there are 2 '
'or more datasets in the TimeSeriesDisplay '
'object.'
)
elif dsname is None:
dsname = list(self._ds.keys())[0]
temp_ds = self._ds[dsname].copy()
if sel_kwargs is not None:
temp_ds = temp_ds.sel(**sel_kwargs, method='nearest')
if isel_kwargs is not None:
temp_ds = temp_ds.isel(**isel_kwargs)
if (x is not None and y is None) or (y is None and x is not None):
raise RuntimeError(
'Both x and y must be specified if we are'
+ 'not trying to automatically detect them!'
)
if x is not None:
coord_list = {}
x_coord_dim = temp_ds[x].dims[0]
coord_list[x] = x_coord_dim
y_coord_dim = temp_ds[y].dims[0]
coord_list[y] = y_coord_dim
new_ds = data_utils.assign_coordinates(temp_ds, coord_list)
my_dataarray = new_ds[varname]
else:
my_dataarray = temp_ds[varname]
coord_keys = [key for key in my_dataarray.coords.keys()]
# X-array will sometimes shorten latitude and longitude variables
if x == 'longitude' and x not in coord_keys:
xc = 'lon'
else:
xc = x
if y == 'latitude' and y not in coord_keys:
yc = 'lat'
else:
yc = y
if x is None:
ax = my_dataarray.plot(ax=self.axes[subplot_index], **kwargs)
else:
ax = my_dataarray.plot(ax=self.axes[subplot_index], x=xc, y=yc, **kwargs)
the_coords = [the_keys for the_keys in my_dataarray.coords.keys()]
if x is None:
x = the_coords[0]
else:
x = coord_list[x]
if y is None:
y = the_coords[1]
else:
y = coord_list[y]
xrng = self.axes[subplot_index].get_xlim()
self.set_xrng(xrng, subplot_index)
yrng = self.axes[subplot_index].get_ylim()
self.set_yrng(yrng, subplot_index)
del temp_ds
return ax
def plot_xsection_map(
self, dsname, varname, subplot_index=(0,), coastlines=True, background=False, **kwargs
):
"""
Plots a cross section of 2D data on a geographical map.
Parameters
----------
dsname : str or None
The name of the datastream to plot from. Set to None
to have ACT attempt to automatically detect this.
varname : str
The name of the variable to plot.
subplot_index : tuple
The index of the subplot to plot inside.
coastlines : bool
Set to True to plot the coastlines.
background : bool
Set to True to plot a stock image background.
**kwargs : keyword arguments
Additional keyword arguments will be passed into
:func:`act.plotting.XSectionDisplay.plot_xsection`
Returns
-------
ax : matplotlib axis handle
The matplotlib axis handle corresponding to the plot.
"""
if not CARTOPY_AVAILABLE:
raise ImportError(
'Cartopy needs to be installed in order to plot ' + 'cross sections on maps!'
)
self.set_subplot_to_map(subplot_index)
self.plot_xsection(dsname, varname, subplot_index=subplot_index, **kwargs)
xlims = self.xrng[subplot_index].flatten()
ylims = self.yrng[subplot_index].flatten()
self.axes[subplot_index].set_xticks(np.linspace(round(xlims[0], 0), round(xlims[1], 0), 10))
self.axes[subplot_index].set_yticks(np.linspace(round(ylims[0], 0), round(ylims[1], 0), 10))
if coastlines:
self.axes[subplot_index].coastlines(resolution='10m')
if background:
self.axes[subplot_index].stock_img()
return self.axes[subplot_index]
<|code_end|>
|
act/plotting/xsectiondisplay.py
<|code_start|>"""
Stores the class for XSectionDisplay.
"""
# Import third party libraries
import matplotlib.pyplot as plt
import numpy as np
try:
import cartopy.crs as ccrs
CARTOPY_AVAILABLE = True
except ImportError:
CARTOPY_AVAILABLE = False
# Import Local Libs
from ..utils import data_utils
from .plot import Display
class XSectionDisplay(Display):
"""
Plots cross sections of multidimensional datasets. The data
must be able to be sliced into a 2 dimensional slice using the
xarray :func:`xarray.Dataset.sel` and :func:`xarray.Dataset.isel` commands.
This is inherited from the :func:`act.plotting.Display`
class and has therefore has the same attributes as that class.
See :func:`act.plotting.Display`
for more information. There are no additional attributes or parameters
to this class.
In order to create geographic plots, ACT needs the Cartopy package to be
installed on your system. More information about
Cartopy go here:https://scitools.org.uk/cartopy/docs/latest/.
Examples
--------
For example, if you only want to do a cross section through the first
time period of a 3D dataset called :code:`ir_temperature`, you would
do the following in xarray:
.. code-block:: python
time_slice = my_ds["ir_temperature"].isel(time=0)
The methods of this class support passing in keyword arguments into
xarray :func:`xarray.Dataset.sel` and :func:`xarray.Dataset.isel` commands
so that new datasets do not need to be created when slicing by specific time
periods or spatial slices. For example, to plot the first time period
from :code:`my_ds`, simply do:
.. code-block:: python
xsection = XSectionDisplay(my_ds, figsize=(15, 8))
xsection.plot_xsection_map(
None,
"ir_temperature",
vmin=220,
vmax=300,
cmap="Greys",
x="longitude",
y="latitude",
isel_kwargs={"time": 0},
)
Here, the array is sliced by the first time period as specified
in :code:`isel_kwargs`. The other keyword arguments are standard keyword
arguments taken by :func:`matplotlib.pyplot.pcolormesh`.
"""
def __init__(self, ds, subplot_shape=(1,), ds_name=None, **kwargs):
super().__init__(ds, subplot_shape, ds_name, **kwargs)
def set_subplot_to_map(self, subplot_index):
self.fig.delaxes(self.axes[subplot_index])
total_num_plots = self.axes.shape
if len(total_num_plots) == 2:
second_number = total_num_plots[0]
j = subplot_index[1]
else:
second_number = 1
j = 0
third_number = second_number * subplot_index[0] + j + 1
self.axes[subplot_index] = plt.subplot(
total_num_plots[0],
second_number,
third_number,
projection=ccrs.PlateCarree(),
)
def set_xrng(self, xrng, subplot_index=(0,)):
"""
Sets the x range of the plot.
Parameters
----------
xrng : 2 number array
The x limits of the plot.
subplot_index : 1 or 2D tuple, list, or array
The index of the subplot to set the x range of.
"""
if self.axes is None:
raise RuntimeError('set_xrng requires the plot to be displayed.')
if not hasattr(self, 'xrng') and len(self.axes.shape) == 2:
self.xrng = np.zeros((self.axes.shape[0], self.axes.shape[1], 2), dtype=xrng[0].dtype)
elif not hasattr(self, 'xrng') and len(self.axes.shape) == 1:
self.xrng = np.zeros((self.axes.shape[0], 2), dtype=xrng[0].dtype)
self.axes[subplot_index].set_xlim(xrng)
self.xrng[subplot_index, :] = np.array(xrng)
def set_yrng(self, yrng, subplot_index=(0,)):
"""
Sets the y range of the plot.
Parameters
----------
yrng : 2 number array
The y limits of the plot.
subplot_index : 1 or 2D tuple, list, or array
The index of the subplot to set the x range of.
"""
if self.axes is None:
raise RuntimeError('set_yrng requires the plot to be displayed.')
if not hasattr(self, 'yrng') and len(self.axes.shape) == 2:
self.yrng = np.zeros((self.axes.shape[0], self.axes.shape[1], 2), dtype=yrng[0].dtype)
elif not hasattr(self, 'yrng') and len(self.axes.shape) == 1:
self.yrng = np.zeros((self.axes.shape[0], 2), dtype=yrng[0].dtype)
if yrng[0] == yrng[1]:
yrng[1] = yrng[1] + 1
self.axes[subplot_index].set_ylim(yrng)
self.yrng[subplot_index, :] = yrng
def plot_xsection(
self,
dsname,
varname,
x=None,
y=None,
subplot_index=(0,),
sel_kwargs=None,
isel_kwargs=None,
**kwargs,
):
"""
This function plots a cross section whose x and y coordinates are
specified by the variable names either provided by the user or
automatically detected by xarray.
Parameters
----------
dsname : str or None
The name of the datastream to plot from. Set to None to have
ACT attempt to automatically detect this.
varname : str
The name of the variable to plot.
x : str or None
The name of the x coordinate variable.
y : str or None
The name of the y coordinate variable.
subplot_index : tuple
The index of the subplot to create the plot in.
sel_kwargs : dict
The keyword arguments to pass into :py:func:`xarray.DataArray.sel`
This is useful when your data is in 3 or more dimensions and you
want to only view a cross section on a specific x-y plane. For more
information on how to use xarray's .sel and .isel functionality
to slice datasets, see the documentation on :func:`xarray.DataArray.sel`.
isel_kwargs : dict
The keyword arguments to pass into :py:func:`xarray.DataArray.sel`
**kwargs : keyword arguments
Additional keyword arguments will be passed into
:func:`xarray.DataArray.plot`.
Returns
-------
ax : matplotlib axis handle
The matplotlib axis handle corresponding to the plot.
"""
if dsname is None and len(self._ds.keys()) > 1:
raise ValueError(
'You must choose a datastream when there are 2 '
'or more datasets in the TimeSeriesDisplay '
'object.'
)
elif dsname is None:
dsname = list(self._ds.keys())[0]
temp_ds = self._ds[dsname].copy()
if sel_kwargs is not None:
temp_ds = temp_ds.sel(**sel_kwargs, method='nearest')
if isel_kwargs is not None:
temp_ds = temp_ds.isel(**isel_kwargs)
if (x is not None and y is None) or (y is None and x is not None):
raise RuntimeError(
'Both x and y must be specified if we are'
+ 'not trying to automatically detect them!'
)
if x is not None:
coord_list = {}
x_coord_dim = temp_ds[x].dims[0]
coord_list[x] = x_coord_dim
y_coord_dim = temp_ds[y].dims[0]
coord_list[y] = y_coord_dim
new_ds = data_utils.assign_coordinates(temp_ds, coord_list)
my_dataarray = new_ds[varname]
else:
my_dataarray = temp_ds[varname]
coord_keys = [key for key in my_dataarray.coords.keys()]
# X-array will sometimes shorten latitude and longitude variables
if x == 'longitude' and x not in coord_keys:
xc = 'lon'
else:
xc = x
if y == 'latitude' and y not in coord_keys:
yc = 'lat'
else:
yc = y
if x is None:
my_dataarray.plot(ax=self.axes[subplot_index], **kwargs)
else:
my_dataarray.plot(ax=self.axes[subplot_index], x=xc, y=yc, **kwargs)
the_coords = [the_keys for the_keys in my_dataarray.coords.keys()]
if x is None:
x = the_coords[0]
else:
x = coord_list[x]
if y is None:
y = the_coords[1]
else:
y = coord_list[y]
xrng = self.axes[subplot_index].get_xlim()
self.set_xrng(xrng, subplot_index)
yrng = self.axes[subplot_index].get_ylim()
self.set_yrng(yrng, subplot_index)
del temp_ds
return self.axes[subplot_index]
def plot_xsection_map(
self, dsname, varname, subplot_index=(0,), coastlines=True, background=False, **kwargs
):
"""
Plots a cross section of 2D data on a geographical map.
Parameters
----------
dsname : str or None
The name of the datastream to plot from. Set to None
to have ACT attempt to automatically detect this.
varname : str
The name of the variable to plot.
subplot_index : tuple
The index of the subplot to plot inside.
coastlines : bool
Set to True to plot the coastlines.
background : bool
Set to True to plot a stock image background.
**kwargs : keyword arguments
Additional keyword arguments will be passed into
:func:`act.plotting.XSectionDisplay.plot_xsection`
Returns
-------
ax : matplotlib axis handle
The matplotlib axis handle corresponding to the plot.
"""
if not CARTOPY_AVAILABLE:
raise ImportError(
'Cartopy needs to be installed in order to plot ' + 'cross sections on maps!'
)
self.set_subplot_to_map(subplot_index)
self.plot_xsection(dsname, varname, subplot_index=subplot_index, **kwargs)
xlims = self.xrng[subplot_index].flatten()
ylims = self.yrng[subplot_index].flatten()
self.axes[subplot_index].set_xticks(np.linspace(round(xlims[0], 0), round(xlims[1], 0), 10))
self.axes[subplot_index].set_yticks(np.linspace(round(ylims[0], 0), round(ylims[1], 0), 10))
if coastlines:
self.axes[subplot_index].coastlines(resolution='10m')
if background:
self.axes[subplot_index].stock_img()
return self.axes[subplot_index]
<|code_end|>
|
Add a PieChartDisplay to Support the Aerosol Community
Pie charts are helpful when working with aerosol data. We should have this functionality in ACT.
|
act/plotting/distributiondisplay.py
<|code_start|>""" Module for Distribution Plotting. """
import matplotlib.pyplot as plt
import numpy as np
import xarray as xr
import pandas as pd
from ..utils import datetime_utils as dt_utils
from .plot import Display
class DistributionDisplay(Display):
"""
This class is used to make distribution related plots. It is inherited from Display
and therefore contains all of Display's attributes and methods.
Examples
--------
To create a DistributionDisplay with 3 rows, simply do:
.. code-block:: python
ds = act.io.read_arm_netcdf(the_file)
disp = act.plotting.DistsributionDisplay(ds, subplot_shape=(3,), figsize=(15, 5))
The DistributionDisplay constructor takes in the same keyword arguments as
plt.subplots. For more information on the plt.subplots keyword arguments,
see the `matplotlib documentation
<https://matplotlib.org/api/_as_gen/matplotlib.pyplot.subplots.html>`_.
If no subplot_shape is provided, then no figure or axis will be created
until add_subplots or plots is called.
"""
def __init__(self, ds, subplot_shape=(1,), ds_name=None, **kwargs):
super().__init__(ds, subplot_shape, ds_name, **kwargs)
def set_xrng(self, xrng, subplot_index=(0,)):
"""
Sets the x range of the plot.
Parameters
----------
xrng : 2 number array
The x limits of the plot.
subplot_index : 1 or 2D tuple, list, or array
The index of the subplot to set the x range of.
"""
if self.axes is None:
raise RuntimeError('set_xrng requires the plot to be displayed.')
if not hasattr(self, 'xrng') and len(self.axes.shape) == 2:
self.xrng = np.zeros((self.axes.shape[0], self.axes.shape[1], 2), dtype='datetime64[D]')
elif not hasattr(self, 'xrng') and len(self.axes.shape) == 1:
self.xrng = np.zeros((self.axes.shape[0], 2), dtype='datetime64[D]')
self.axes[subplot_index].set_xlim(xrng)
self.xrng[subplot_index, :] = np.array(xrng)
def set_yrng(self, yrng, subplot_index=(0,)):
"""
Sets the y range of the plot.
Parameters
----------
yrng : 2 number array
The y limits of the plot.
subplot_index : 1 or 2D tuple, list, or array
The index of the subplot to set the x range of.
"""
if self.axes is None:
raise RuntimeError('set_yrng requires the plot to be displayed.')
if not hasattr(self, 'yrng') and len(self.axes.shape) == 2:
self.yrng = np.zeros((self.axes.shape[0], self.axes.shape[1], 2))
elif not hasattr(self, 'yrng') and len(self.axes.shape) == 1:
self.yrng = np.zeros((self.axes.shape[0], 2))
if yrng[0] == yrng[1]:
yrng[1] = yrng[1] + 1
self.axes[subplot_index].set_ylim(yrng)
self.yrng[subplot_index, :] = yrng
def _get_data(self, dsname, fields):
if isinstance(fields, str):
fields = [fields]
return self._ds[dsname][fields].dropna('time')
def plot_stacked_bar(
self,
field,
dsname=None,
bins=10,
sortby_field=None,
sortby_bins=None,
subplot_index=(0,),
set_title=None,
density=False,
hist_kwargs=dict(),
**kwargs,
):
"""
This procedure will plot a stacked bar graph of a histogram.
Parameters
----------
field : str
The name of the field to take the histogram of.
dsname : str or None
The name of the datastream the field is contained in. Set
to None to let ACT automatically determine this.
bins : array-like or int
The histogram bin boundaries to use. If not specified, numpy's
default 10 is used.
sortby_field : str or None
Set this option to a field name in order to sort the histograms
by a given field parameter. For example, one can sort histograms of CO2
concentration by temperature.
sortby_bins : array-like or None
The bins to sort the histograms by.
subplot_index : tuple
The subplot index to place the plot in
set_title : str
The title of the plot.
density : bool
Set to True to plot a p.d.f. instead of a frequency histogram.
hist_kwargs : dict
Additional keyword arguments to pass to numpy histogram.
Other keyword arguments will be passed into :func:`matplotlib.pyplot.bar`.
Returns
-------
return_dict : dict
A dictionary containing the plot axis handle, bin boundaries, and
generated histogram.
"""
if dsname is None and len(self._ds.keys()) > 1:
raise ValueError(
'You must choose a datastream when there are 2 '
+ 'or more datasets in the TimeSeriesDisplay '
+ 'object.'
)
elif dsname is None:
dsname = list(self._ds.keys())[0]
if sortby_field is not None:
ds = self._get_data(dsname, [field, sortby_field])
xdata, ydata = ds[field], ds[sortby_field]
else:
xdata = self._get_data(dsname, field)[field]
if 'units' in xdata.attrs:
xtitle = ''.join(['(', xdata.attrs['units'], ')'])
else:
xtitle = field
if sortby_bins is None and sortby_field is not None:
# We will defaut the y direction to have the same # of bins as x
if isinstance(bins, int):
n_bins = bins
else:
n_bins = len(bins)
sortby_bins = np.linspace(ydata.values.min(), ydata.values.max(), n_bins)
# Get the current plotting axis
if self.fig is None:
self.fig = plt.figure()
if self.axes is None:
self.axes = np.array([plt.axes()])
self.fig.add_axes(self.axes[0])
if sortby_field is not None:
if 'units' in ydata.attrs:
ytitle = ''.join(['(', ydata.attrs['units'], ')'])
else:
ytitle = field
my_hist, x_bins, y_bins = np.histogram2d(
xdata.values.flatten(),
ydata.values.flatten(),
density=density,
bins=[bins, sortby_bins],
**hist_kwargs,
)
x_inds = (x_bins[:-1] + x_bins[1:]) / 2.0
self.axes[subplot_index].bar(
x_inds,
my_hist[:, 0].flatten(),
label=(str(y_bins[0]) + ' to ' + str(y_bins[1])),
**kwargs,
)
for i in range(1, len(y_bins) - 1):
self.axes[subplot_index].bar(
x_inds,
my_hist[:, i].flatten(),
bottom=my_hist[:, i - 1],
label=(str(y_bins[i]) + ' to ' + str(y_bins[i + 1])),
**kwargs,
)
self.axes[subplot_index].legend()
else:
my_hist, bins = np.histogram(
xdata.values.flatten(), bins=bins, density=density, **hist_kwargs
)
x_inds = (bins[:-1] + bins[1:]) / 2.0
self.axes[subplot_index].bar(x_inds, my_hist)
# Set Title
if set_title is None:
set_title = ' '.join(
[
dsname,
field,
'on',
dt_utils.numpy_to_arm_date(self._ds[dsname].time.values[0]),
]
)
self.axes[subplot_index].set_title(set_title)
self.axes[subplot_index].set_ylabel('count')
self.axes[subplot_index].set_xlabel(xtitle)
return_dict = {}
return_dict['plot_handle'] = self.axes[subplot_index]
if 'x_bins' in locals():
return_dict['x_bins'] = x_bins
return_dict['y_bins'] = y_bins
else:
return_dict['bins'] = bins
return_dict['histogram'] = my_hist
return return_dict
def plot_size_distribution(
self, field, bins, time=None, dsname=None, subplot_index=(0,), set_title=None, **kwargs
):
"""
This procedure plots a stairstep plot of a size distribution. This is
useful for plotting size distributions and waveforms.
Parameters
----------
field : str
The name of the field to plot the spectrum from.
bins : str or array-like
The name of the field that stores the bins for the spectra.
time : none or datetime
If None, spectra to plot will be automatically determined.
Otherwise, specify this field for the time period to plot.
dsname : str
The name of the Dataset to plot. Set to None to have
ACT automatically determine this.
subplot_index : tuple
The subplot index to place the plot in.
set_title : str or None
Use this to set the title.
Additional keyword arguments will be passed into :func:`matplotlib.pyplot.step`
Returns
-------
ax : matplotlib axis handle
The matplotlib axis handle referring to the plot.
"""
if dsname is None and len(self._ds.keys()) > 1:
raise ValueError(
'You must choose a datastream when there are 2 '
+ 'or more datasets in the TimeSeriesDisplay '
+ 'object.'
)
elif dsname is None:
dsname = list(self._ds.keys())[0]
xdata = self._get_data(dsname, field)[field]
if isinstance(bins, str):
bins = self._ds[dsname][bins]
else:
bins = xr.DataArray(bins)
if 'units' in bins.attrs:
xtitle = ''.join(['(', bins.attrs['units'], ')'])
else:
xtitle = 'Bin #'
if 'units' in xdata.attrs:
ytitle = ''.join(['(', xdata.attrs['units'], ')'])
else:
ytitle = field
if len(xdata.dims) > 1 and time is None:
raise ValueError(
'Input data has more than one dimension, ' + 'you must specify a time to plot!'
)
elif len(xdata.dims) > 1:
xdata = xdata.sel(time=time, method='nearest')
if len(bins.dims) > 1 or len(bins.values) != len(xdata.values):
raise ValueError(
'Bins must be a one dimensional field whose '
+ 'length is equal to the field length!'
)
# Get the current plotting axis
if self.fig is None:
self.fig = plt.figure()
if self.axes is None:
self.axes = np.array([plt.axes()])
self.fig.add_axes(self.axes[0])
# Set Title
if set_title is None:
set_title = ' '.join(
[
dsname,
field,
'on',
dt_utils.numpy_to_arm_date(self._ds[dsname].time.values[0]),
]
)
if time is not None:
t = pd.Timestamp(time)
set_title += ''.join(
[' at ', ':'.join([str(t.hour), str(t.minute), str(t.second)])]
)
self.axes[subplot_index].set_title(set_title)
self.axes[subplot_index].step(bins.values, xdata.values, **kwargs)
self.axes[subplot_index].set_xlabel(xtitle)
self.axes[subplot_index].set_ylabel(ytitle)
return self.axes[subplot_index]
def plot_stairstep(
self,
field,
dsname=None,
bins=10,
sortby_field=None,
sortby_bins=None,
subplot_index=(0,),
set_title=None,
density=False,
hist_kwargs=dict(),
**kwargs,
):
"""
This procedure will plot a stairstep plot of a histogram.
Parameters
----------
field : str
The name of the field to take the histogram of.
dsname : str or None
The name of the datastream the field is contained in. Set
to None to let ACT automatically determine this.
bins : array-like or int
The histogram bin boundaries to use. If not specified, numpy's
default 10 is used.
sortby_field : str or None
Set this option to a field name in order to sort the histograms
by a given field parameter. For example, one can sort histograms of CO2
concentration by temperature.
sortby_bins : array-like or None
The bins to sort the histograms by.
subplot_index : tuple
The subplot index to place the plot in.
set_title : str
The title of the plot.
density : bool
Set to True to plot a p.d.f. instead of a frequency histogram.
hist_kwargs : dict
Additional keyword arguments to pass to numpy histogram.
Other keyword arguments will be passed into :func:`matplotlib.pyplot.step`.
Returns
-------
return_dict : dict
A dictionary containing the plot axis handle, bin boundaries, and
generated histogram.
"""
if dsname is None and len(self._ds.keys()) > 1:
raise ValueError(
'You must choose a datastream when there are 2 '
+ 'or more datasets in the TimeSeriesDisplay '
+ 'object.'
)
elif dsname is None:
dsname = list(self._ds.keys())[0]
xdata = self._get_data(dsname, field)[field]
if 'units' in xdata.attrs:
xtitle = ''.join(['(', xdata.attrs['units'], ')'])
else:
xtitle = field
if sortby_field is not None:
ydata = self._ds[dsname][sortby_field]
if sortby_bins is None and sortby_field is not None:
if isinstance(bins, int):
n_bins = bins
else:
n_bins = len(bins)
# We will defaut the y direction to have the same # of bins as x
sortby_bins = np.linspace(ydata.values.min(), ydata.values.max(), n_bins)
# Get the current plotting axis, add day/night background and plot data
if self.fig is None:
self.fig = plt.figure()
if self.axes is None:
self.axes = np.array([plt.axes()])
self.fig.add_axes(self.axes[0])
if sortby_field is not None:
if 'units' in ydata.attrs:
ytitle = ''.join(['(', ydata.attrs['units'], ')'])
else:
ytitle = field
my_hist, x_bins, y_bins = np.histogram2d(
xdata.values.flatten(),
ydata.values.flatten(),
density=density,
bins=[bins, sortby_bins],
**hist_kwargs,
)
x_inds = (x_bins[:-1] + x_bins[1:]) / 2.0
self.axes[subplot_index].step(
x_inds,
my_hist[:, 0].flatten(),
label=(str(y_bins[0]) + ' to ' + str(y_bins[1])),
**kwargs,
)
for i in range(1, len(y_bins) - 1):
self.axes[subplot_index].step(
x_inds,
my_hist[:, i].flatten(),
label=(str(y_bins[i]) + ' to ' + str(y_bins[i + 1])),
**kwargs,
)
self.axes[subplot_index].legend()
else:
my_hist, bins = np.histogram(
xdata.values.flatten(), bins=bins, density=density, **hist_kwargs
)
x_inds = (bins[:-1] + bins[1:]) / 2.0
self.axes[subplot_index].step(x_inds, my_hist, **kwargs)
# Set Title
if set_title is None:
set_title = ' '.join(
[
dsname,
field,
'on',
dt_utils.numpy_to_arm_date(self._ds[dsname].time.values[0]),
]
)
self.axes[subplot_index].set_title(set_title)
self.axes[subplot_index].set_ylabel('count')
self.axes[subplot_index].set_xlabel(xtitle)
return_dict = {}
return_dict['plot_handle'] = self.axes[subplot_index]
if 'x_bins' in locals():
return_dict['x_bins'] = x_bins
return_dict['y_bins'] = y_bins
else:
return_dict['bins'] = bins
return_dict['histogram'] = my_hist
return return_dict
def plot_heatmap(
self,
x_field,
y_field,
dsname=None,
x_bins=None,
y_bins=None,
subplot_index=(0,),
set_title=None,
density=False,
set_shading='auto',
hist_kwargs=dict(),
threshold=None,
**kwargs,
):
"""
This procedure will plot a heatmap of a histogram from 2 variables.
Parameters
----------
x_field : str
The name of the field to take the histogram of on the X axis.
y_field : str
The name of the field to take the histogram of on the Y axis.
dsname : str or None
The name of the datastream the field is contained in. Set
to None to let ACT automatically determine this.
x_bins : array-like, int, or None
The histogram bin boundaries to use for the variable on the X axis.
Set to None to use numpy's default boundaries.
If an int, will indicate the number of bins to use
y_bins : array-like, int, or None
The histogram bin boundaries to use for the variable on the Y axis.
Set to None to use numpy's default boundaries.
If an int, will indicate the number of bins to use
subplot_index : tuple
The subplot index to place the plot in
set_title : str
The title of the plot.
density : bool
Set to True to plot a p.d.f. instead of a frequency histogram.
set_shading : string
Option to to set the matplotlib.pcolormesh shading parameter.
Default to 'auto'
threshold : float
Value on which to threshold the histogram results for plotting.
Setting to 0 will ensure that all 0 values are removed from the plot
making it easier to distringuish between 0 and low values
hist_kwargs : Additional keyword arguments to pass to numpy histogram.
Other keyword arguments will be passed into :func:`matplotlib.pyplot.pcolormesh`.
Returns
-------
return_dict : dict
A dictionary containing the plot axis handle, bin boundaries, and
generated histogram.
"""
if dsname is None and len(self._ds.keys()) > 1:
raise ValueError(
'You must choose a datastream when there are 2 '
'or more datasets in the TimeSeriesDisplay '
'object.'
)
elif dsname is None:
dsname = list(self._ds.keys())[0]
ds = self._get_data(dsname, [x_field, y_field])
xdata, ydata = ds[x_field], ds[y_field]
if 'units' in xdata.attrs:
xtitle = ''.join(['(', xdata.attrs['units'], ')'])
else:
xtitle = x_field
if x_bins is not None and isinstance(x_bins, int):
x_bins = np.linspace(xdata.values.min(), xdata.values.max(), x_bins)
if y_bins is not None and isinstance(x_bins, int):
y_bins = np.linspace(ydata.values.min(), ydata.values.max(), y_bins)
if x_bins is not None and y_bins is None:
# We will defaut the y direction to have the same # of bins as x
y_bins = np.linspace(ydata.values.min(), ydata.values.max(), len(x_bins))
# Get the current plotting axis, add day/night background and plot data
if self.fig is None:
self.fig = plt.figure()
if self.axes is None:
self.axes = np.array([plt.axes()])
self.fig.add_axes(self.axes[0])
if 'units' in ydata.attrs:
ytitle = ''.join(['(', ydata.attrs['units'], ')'])
else:
ytitle = y_field
if x_bins is None:
my_hist, x_bins, y_bins = np.histogram2d(
xdata.values.flatten(), ydata.values.flatten(), density=density, **hist_kwargs
)
else:
my_hist, x_bins, y_bins = np.histogram2d(
xdata.values.flatten(),
ydata.values.flatten(),
density=density,
bins=[x_bins, y_bins],
**hist_kwargs,
)
# Adding in the ability to threshold the heatmaps
if threshold is not None:
my_hist[my_hist <= threshold] = np.nan
x_inds = (x_bins[:-1] + x_bins[1:]) / 2.0
y_inds = (y_bins[:-1] + y_bins[1:]) / 2.0
xi, yi = np.meshgrid(x_inds, y_inds, indexing='ij')
mesh = self.axes[subplot_index].pcolormesh(xi, yi, my_hist, shading=set_shading, **kwargs)
# Set Title
if set_title is None:
set_title = ' '.join(
[
dsname,
'on',
dt_utils.numpy_to_arm_date(self._ds[dsname].time.values[0]),
]
)
self.axes[subplot_index].set_title(set_title)
self.axes[subplot_index].set_ylabel(ytitle)
self.axes[subplot_index].set_xlabel(xtitle)
self.add_colorbar(mesh, title='count', subplot_index=subplot_index)
return_dict = {}
return_dict['plot_handle'] = self.axes[subplot_index]
return_dict['x_bins'] = x_bins
return_dict['y_bins'] = y_bins
return_dict['histogram'] = my_hist
return return_dict
def set_ratio_line(self, subplot_index=(0,)):
"""
Sets the 1:1 ratio line.
Parameters
----------
subplot_index : 1 or 2D tuple, list, or array
The index of the subplot to set the x range of.
"""
if self.axes is None:
raise RuntimeError('set_ratio_line requires the plot to be displayed.')
# Define the xticks of the figure
xlims = self.axes[subplot_index].get_xticks()
ratio = np.linspace(xlims, xlims[-1])
self.axes[subplot_index].plot(ratio, ratio, 'k--')
def plot_scatter(
self,
x_field,
y_field,
m_field=None,
dsname=None,
cbar_label=None,
set_title=None,
subplot_index=(0,),
**kwargs,
):
"""
This procedure will produce a scatter plot from 2 variables.
Parameters
----------
x_field : str
The name of the field to display on the X axis.
y_field : str
The name of the field to display on the Y axis.
m_field : str
The name of the field to display on the markers.
cbar_label : str
The desired name to plot for the colorbar
set_title : str
The desired title for the plot.
Default title is created from the datastream.
dsname : str or None
The name of the datastream the field is contained in. Set
to None to let ACT automatically determine this.
subplot_index : tuple
The subplot index to place the plot in
Other keyword arguments will be passed into :func:`matplotlib.pyplot.scatter`.
Returns
-------
ax : matplotlib axis handle
The matplotlib axis handle of the plot
"""
if dsname is None and len(self._ds.keys()) > 1:
raise ValueError(
'You must choose a datastream when there are 2 '
'or more datasets in the TimeSeriesDisplay '
'object.'
)
elif dsname is None:
dsname = list(self._ds.keys())[0]
if m_field is None:
mdata = None
ds = self._get_data(dsname, [x_field, y_field])
xdata, ydata = ds[x_field], ds[y_field]
else:
ds = self._get_data(dsname, [x_field, y_field, m_field])
xdata, ydata, mdata = ds[x_field], ds[y_field], ds[m_field]
# Define the x-axis label. If units are avaiable, plot.
if 'units' in xdata.attrs:
xtitle = x_field + ''.join([' (', xdata.attrs['units'], ')'])
else:
xtitle = x_field
# Define the y-axis label. If units are available, plot
if 'units' in ydata.attrs:
ytitle = y_field + ''.join([' (', ydata.attrs['units'], ')'])
else:
ytitle = y_field
# Get the current plotting axis, add day/night background and plot data
if self.fig is None:
self.fig = plt.figure()
# Define the axes for the figure
if self.axes is None:
self.axes = np.array([plt.axes()])
self.fig.add_axes(self.axes[0])
# Display the scatter plot, pass keyword args for unspecified attributes
scc = self.axes[subplot_index].scatter(xdata, ydata, c=mdata, **kwargs)
# Set Title
if set_title is None:
set_title = ' '.join(
[
dsname,
'on',
dt_utils.numpy_to_arm_date(self._ds[dsname].time.values[0]),
]
)
# Check to see if a colorbar label was set
if mdata is not None:
if cbar_label is None:
# Define the y-axis label. If units are available, plot
if 'units' in mdata.attrs:
ztitle = m_field + ''.join([' (', mdata.attrs['units'], ')'])
else:
ztitle = m_field
else:
ztitle = cbar_label
# Plot the colorbar
cbar = plt.colorbar(scc)
cbar.ax.set_ylabel(ztitle)
# Define the axe title, x-axis label, y-axis label
self.axes[subplot_index].set_title(set_title)
self.axes[subplot_index].set_ylabel(ytitle)
self.axes[subplot_index].set_xlabel(xtitle)
return self.axes[subplot_index]
def plot_violin(
self,
field,
positions=None,
dsname=None,
vert=True,
showmeans=True,
showmedians=True,
showextrema=True,
subplot_index=(0,),
set_title=None,
**kwargs,
):
"""
This procedure will produce a violin plot for the selected
field (or fields).
Parameters
----------
field : str or list
The name of the field (or fields) to display on the X axis.
positions : array-like, Default: None
The positions of the ticks along dependent axis.
dsname : str or None
The name of the datastream the field is contained in. Set
to None to let ACT automatically determine this.
vert : Boolean, Default: True
Display violin plot vertical. False will display horizontal.
showmeans : Boolean; Default: False
If True, will display the mean of the datastream.
showmedians : Boolean; Default: False
If True, will display the medium of the datastream.
showextrema: Boolean; Default: False
If True, will display the extremes of the datastream.
subplot_index : tuple
The subplot index to place the plot in
set_title : str
The title of the plot.
Other keyword arguments will be passed into :func:`matplotlib.pyplot.violinplot`.
Returns
-------
ax : matplotlib axis handle
The matplotlib axis handle of the plot
"""
if dsname is None and len(self._ds.keys()) > 1:
raise ValueError(
'You must choose a datastream when there are 2 '
'or more datasets in the TimeSeriesDisplay '
'object.'
)
if dsname is None:
dsname = list(self._ds.keys())[0]
ds = self._get_data(dsname, field)
ndata = ds[field]
# Get the current plotting axis, add day/night background and plot data
if self.fig is None:
self.fig = plt.figure()
# Define the axes for the figure
if self.axes is None:
self.axes = np.array([plt.axes()])
self.fig.add_axes(self.axes[0])
# Define the axe label. If units are avaiable, plot.
if 'units' in ndata.attrs:
axtitle = field + ''.join([' (', ndata.attrs['units'], ')'])
else:
axtitle = field
# Display the scatter plot, pass keyword args for unspecified attributes
scc = self.axes[subplot_index].violinplot(
ndata,
positions=positions,
vert=vert,
showmeans=showmeans,
showmedians=showmedians,
showextrema=showextrema,
**kwargs,
)
if showmeans is True:
scc['cmeans'].set_edgecolor('red')
scc['cmeans'].set_label('mean')
if showmedians is True:
scc['cmedians'].set_edgecolor('black')
scc['cmedians'].set_label('median')
# Set Title
if set_title is None:
set_title = ' '.join(
[
dsname,
'on',
dt_utils.numpy_to_arm_date(self._ds[dsname].time.values[0]),
]
)
# Define the axe title, x-axis label, y-axis label
self.axes[subplot_index].set_title(set_title)
if vert is True:
self.axes[subplot_index].set_ylabel(axtitle)
if positions is None:
self.axes[subplot_index].set_xticks([])
else:
self.axes[subplot_index].set_xlabel(axtitle)
if positions is None:
self.axes[subplot_index].set_yticks([])
return self.axes[subplot_index]
<|code_end|>
examples/plotting/plot_pie_chart.py
<|code_start|><|code_end|>
|
act/plotting/distributiondisplay.py
<|code_start|>""" Module for Distribution Plotting. """
import matplotlib.pyplot as plt
import numpy as np
import xarray as xr
import pandas as pd
from ..utils import datetime_utils as dt_utils, calculate_percentages
from .plot import Display
class DistributionDisplay(Display):
"""
This class is used to make distribution related plots. It is inherited from Display
and therefore contains all of Display's attributes and methods.
Examples
--------
To create a DistributionDisplay with 3 rows, simply do:
.. code-block:: python
ds = act.io.read_arm_netcdf(the_file)
disp = act.plotting.DistsributionDisplay(ds, subplot_shape=(3,), figsize=(15, 5))
The DistributionDisplay constructor takes in the same keyword arguments as
plt.subplots. For more information on the plt.subplots keyword arguments,
see the `matplotlib documentation
<https://matplotlib.org/api/_as_gen/matplotlib.pyplot.subplots.html>`_.
If no subplot_shape is provided, then no figure or axis will be created
until add_subplots or plots is called.
"""
def __init__(self, ds, subplot_shape=(1,), ds_name=None, **kwargs):
super().__init__(ds, subplot_shape, ds_name, **kwargs)
def set_xrng(self, xrng, subplot_index=(0,)):
"""
Sets the x range of the plot.
Parameters
----------
xrng : 2 number array
The x limits of the plot.
subplot_index : 1 or 2D tuple, list, or array
The index of the subplot to set the x range of.
"""
if self.axes is None:
raise RuntimeError('set_xrng requires the plot to be displayed.')
if not hasattr(self, 'xrng') and len(self.axes.shape) == 2:
self.xrng = np.zeros((self.axes.shape[0], self.axes.shape[1], 2), dtype='datetime64[D]')
elif not hasattr(self, 'xrng') and len(self.axes.shape) == 1:
self.xrng = np.zeros((self.axes.shape[0], 2), dtype='datetime64[D]')
self.axes[subplot_index].set_xlim(xrng)
self.xrng[subplot_index, :] = np.array(xrng)
def set_yrng(self, yrng, subplot_index=(0,)):
"""
Sets the y range of the plot.
Parameters
----------
yrng : 2 number array
The y limits of the plot.
subplot_index : 1 or 2D tuple, list, or array
The index of the subplot to set the x range of.
"""
if self.axes is None:
raise RuntimeError('set_yrng requires the plot to be displayed.')
if not hasattr(self, 'yrng') and len(self.axes.shape) == 2:
self.yrng = np.zeros((self.axes.shape[0], self.axes.shape[1], 2))
elif not hasattr(self, 'yrng') and len(self.axes.shape) == 1:
self.yrng = np.zeros((self.axes.shape[0], 2))
if yrng[0] == yrng[1]:
yrng[1] = yrng[1] + 1
self.axes[subplot_index].set_ylim(yrng)
self.yrng[subplot_index, :] = yrng
def _get_data(self, dsname, fields):
if isinstance(fields, str):
fields = [fields]
return self._ds[dsname][fields].dropna('time')
def plot_stacked_bar(
self,
field,
dsname=None,
bins=10,
sortby_field=None,
sortby_bins=None,
subplot_index=(0,),
set_title=None,
density=False,
hist_kwargs=dict(),
**kwargs,
):
"""
This procedure will plot a stacked bar graph of a histogram.
Parameters
----------
field : str
The name of the field to take the histogram of.
dsname : str or None
The name of the datastream the field is contained in. Set
to None to let ACT automatically determine this.
bins : array-like or int
The histogram bin boundaries to use. If not specified, numpy's
default 10 is used.
sortby_field : str or None
Set this option to a field name in order to sort the histograms
by a given field parameter. For example, one can sort histograms of CO2
concentration by temperature.
sortby_bins : array-like or None
The bins to sort the histograms by.
subplot_index : tuple
The subplot index to place the plot in
set_title : str
The title of the plot.
density : bool
Set to True to plot a p.d.f. instead of a frequency histogram.
hist_kwargs : dict
Additional keyword arguments to pass to numpy histogram.
Other keyword arguments will be passed into :func:`matplotlib.pyplot.bar`.
Returns
-------
return_dict : dict
A dictionary containing the plot axis handle, bin boundaries, and
generated histogram.
"""
if dsname is None and len(self._ds.keys()) > 1:
raise ValueError(
'You must choose a datastream when there are 2 '
+ 'or more datasets in the TimeSeriesDisplay '
+ 'object.'
)
elif dsname is None:
dsname = list(self._ds.keys())[0]
if sortby_field is not None:
ds = self._get_data(dsname, [field, sortby_field])
xdata, ydata = ds[field], ds[sortby_field]
else:
xdata = self._get_data(dsname, field)[field]
if 'units' in xdata.attrs:
xtitle = ''.join(['(', xdata.attrs['units'], ')'])
else:
xtitle = field
if sortby_bins is None and sortby_field is not None:
# We will defaut the y direction to have the same # of bins as x
if isinstance(bins, int):
n_bins = bins
else:
n_bins = len(bins)
sortby_bins = np.linspace(ydata.values.min(), ydata.values.max(), n_bins)
# Get the current plotting axis
if self.fig is None:
self.fig = plt.figure()
if self.axes is None:
self.axes = np.array([plt.axes()])
self.fig.add_axes(self.axes[0])
if sortby_field is not None:
if 'units' in ydata.attrs:
ytitle = ''.join(['(', ydata.attrs['units'], ')'])
else:
ytitle = field
my_hist, x_bins, y_bins = np.histogram2d(
xdata.values.flatten(),
ydata.values.flatten(),
density=density,
bins=[bins, sortby_bins],
**hist_kwargs,
)
x_inds = (x_bins[:-1] + x_bins[1:]) / 2.0
self.axes[subplot_index].bar(
x_inds,
my_hist[:, 0].flatten(),
label=(str(y_bins[0]) + ' to ' + str(y_bins[1])),
**kwargs,
)
for i in range(1, len(y_bins) - 1):
self.axes[subplot_index].bar(
x_inds,
my_hist[:, i].flatten(),
bottom=my_hist[:, i - 1],
label=(str(y_bins[i]) + ' to ' + str(y_bins[i + 1])),
**kwargs,
)
self.axes[subplot_index].legend()
else:
my_hist, bins = np.histogram(
xdata.values.flatten(), bins=bins, density=density, **hist_kwargs
)
x_inds = (bins[:-1] + bins[1:]) / 2.0
self.axes[subplot_index].bar(x_inds, my_hist)
# Set Title
if set_title is None:
set_title = ' '.join(
[
dsname,
field,
'on',
dt_utils.numpy_to_arm_date(self._ds[dsname].time.values[0]),
]
)
self.axes[subplot_index].set_title(set_title)
self.axes[subplot_index].set_ylabel('count')
self.axes[subplot_index].set_xlabel(xtitle)
return_dict = {}
return_dict['plot_handle'] = self.axes[subplot_index]
if 'x_bins' in locals():
return_dict['x_bins'] = x_bins
return_dict['y_bins'] = y_bins
else:
return_dict['bins'] = bins
return_dict['histogram'] = my_hist
return return_dict
def plot_size_distribution(
self, field, bins, time=None, dsname=None, subplot_index=(0,), set_title=None, **kwargs
):
"""
This procedure plots a stairstep plot of a size distribution. This is
useful for plotting size distributions and waveforms.
Parameters
----------
field : str
The name of the field to plot the spectrum from.
bins : str or array-like
The name of the field that stores the bins for the spectra.
time : none or datetime
If None, spectra to plot will be automatically determined.
Otherwise, specify this field for the time period to plot.
dsname : str
The name of the Dataset to plot. Set to None to have
ACT automatically determine this.
subplot_index : tuple
The subplot index to place the plot in.
set_title : str or None
Use this to set the title.
Additional keyword arguments will be passed into :func:`matplotlib.pyplot.step`
Returns
-------
ax : matplotlib axis handle
The matplotlib axis handle referring to the plot.
"""
if dsname is None and len(self._ds.keys()) > 1:
raise ValueError(
'You must choose a datastream when there are 2 '
+ 'or more datasets in the TimeSeriesDisplay '
+ 'object.'
)
elif dsname is None:
dsname = list(self._ds.keys())[0]
xdata = self._get_data(dsname, field)[field]
if isinstance(bins, str):
bins = self._ds[dsname][bins]
else:
bins = xr.DataArray(bins)
if 'units' in bins.attrs:
xtitle = ''.join(['(', bins.attrs['units'], ')'])
else:
xtitle = 'Bin #'
if 'units' in xdata.attrs:
ytitle = ''.join(['(', xdata.attrs['units'], ')'])
else:
ytitle = field
if len(xdata.dims) > 1 and time is None:
raise ValueError(
'Input data has more than one dimension, ' + 'you must specify a time to plot!'
)
elif len(xdata.dims) > 1:
xdata = xdata.sel(time=time, method='nearest')
if len(bins.dims) > 1 or len(bins.values) != len(xdata.values):
raise ValueError(
'Bins must be a one dimensional field whose '
+ 'length is equal to the field length!'
)
# Get the current plotting axis
if self.fig is None:
self.fig = plt.figure()
if self.axes is None:
self.axes = np.array([plt.axes()])
self.fig.add_axes(self.axes[0])
# Set Title
if set_title is None:
set_title = ' '.join(
[
dsname,
field,
'on',
dt_utils.numpy_to_arm_date(self._ds[dsname].time.values[0]),
]
)
if time is not None:
t = pd.Timestamp(time)
set_title += ''.join(
[' at ', ':'.join([str(t.hour), str(t.minute), str(t.second)])]
)
self.axes[subplot_index].set_title(set_title)
self.axes[subplot_index].step(bins.values, xdata.values, **kwargs)
self.axes[subplot_index].set_xlabel(xtitle)
self.axes[subplot_index].set_ylabel(ytitle)
return self.axes[subplot_index]
def plot_stairstep(
self,
field,
dsname=None,
bins=10,
sortby_field=None,
sortby_bins=None,
subplot_index=(0,),
set_title=None,
density=False,
hist_kwargs=dict(),
**kwargs,
):
"""
This procedure will plot a stairstep plot of a histogram.
Parameters
----------
field : str
The name of the field to take the histogram of.
dsname : str or None
The name of the datastream the field is contained in. Set
to None to let ACT automatically determine this.
bins : array-like or int
The histogram bin boundaries to use. If not specified, numpy's
default 10 is used.
sortby_field : str or None
Set this option to a field name in order to sort the histograms
by a given field parameter. For example, one can sort histograms of CO2
concentration by temperature.
sortby_bins : array-like or None
The bins to sort the histograms by.
subplot_index : tuple
The subplot index to place the plot in.
set_title : str
The title of the plot.
density : bool
Set to True to plot a p.d.f. instead of a frequency histogram.
hist_kwargs : dict
Additional keyword arguments to pass to numpy histogram.
Other keyword arguments will be passed into :func:`matplotlib.pyplot.step`.
Returns
-------
return_dict : dict
A dictionary containing the plot axis handle, bin boundaries, and
generated histogram.
"""
if dsname is None and len(self._ds.keys()) > 1:
raise ValueError(
'You must choose a datastream when there are 2 '
+ 'or more datasets in the TimeSeriesDisplay '
+ 'object.'
)
elif dsname is None:
dsname = list(self._ds.keys())[0]
xdata = self._get_data(dsname, field)[field]
if 'units' in xdata.attrs:
xtitle = ''.join(['(', xdata.attrs['units'], ')'])
else:
xtitle = field
if sortby_field is not None:
ydata = self._ds[dsname][sortby_field]
if sortby_bins is None and sortby_field is not None:
if isinstance(bins, int):
n_bins = bins
else:
n_bins = len(bins)
# We will defaut the y direction to have the same # of bins as x
sortby_bins = np.linspace(ydata.values.min(), ydata.values.max(), n_bins)
# Get the current plotting axis, add day/night background and plot data
if self.fig is None:
self.fig = plt.figure()
if self.axes is None:
self.axes = np.array([plt.axes()])
self.fig.add_axes(self.axes[0])
if sortby_field is not None:
if 'units' in ydata.attrs:
ytitle = ''.join(['(', ydata.attrs['units'], ')'])
else:
ytitle = field
my_hist, x_bins, y_bins = np.histogram2d(
xdata.values.flatten(),
ydata.values.flatten(),
density=density,
bins=[bins, sortby_bins],
**hist_kwargs,
)
x_inds = (x_bins[:-1] + x_bins[1:]) / 2.0
self.axes[subplot_index].step(
x_inds,
my_hist[:, 0].flatten(),
label=(str(y_bins[0]) + ' to ' + str(y_bins[1])),
**kwargs,
)
for i in range(1, len(y_bins) - 1):
self.axes[subplot_index].step(
x_inds,
my_hist[:, i].flatten(),
label=(str(y_bins[i]) + ' to ' + str(y_bins[i + 1])),
**kwargs,
)
self.axes[subplot_index].legend()
else:
my_hist, bins = np.histogram(
xdata.values.flatten(), bins=bins, density=density, **hist_kwargs
)
x_inds = (bins[:-1] + bins[1:]) / 2.0
self.axes[subplot_index].step(x_inds, my_hist, **kwargs)
# Set Title
if set_title is None:
set_title = ' '.join(
[
dsname,
field,
'on',
dt_utils.numpy_to_arm_date(self._ds[dsname].time.values[0]),
]
)
self.axes[subplot_index].set_title(set_title)
self.axes[subplot_index].set_ylabel('count')
self.axes[subplot_index].set_xlabel(xtitle)
return_dict = {}
return_dict['plot_handle'] = self.axes[subplot_index]
if 'x_bins' in locals():
return_dict['x_bins'] = x_bins
return_dict['y_bins'] = y_bins
else:
return_dict['bins'] = bins
return_dict['histogram'] = my_hist
return return_dict
def plot_heatmap(
self,
x_field,
y_field,
dsname=None,
x_bins=None,
y_bins=None,
subplot_index=(0,),
set_title=None,
density=False,
set_shading='auto',
hist_kwargs=dict(),
threshold=None,
**kwargs,
):
"""
This procedure will plot a heatmap of a histogram from 2 variables.
Parameters
----------
x_field : str
The name of the field to take the histogram of on the X axis.
y_field : str
The name of the field to take the histogram of on the Y axis.
dsname : str or None
The name of the datastream the field is contained in. Set
to None to let ACT automatically determine this.
x_bins : array-like, int, or None
The histogram bin boundaries to use for the variable on the X axis.
Set to None to use numpy's default boundaries.
If an int, will indicate the number of bins to use
y_bins : array-like, int, or None
The histogram bin boundaries to use for the variable on the Y axis.
Set to None to use numpy's default boundaries.
If an int, will indicate the number of bins to use
subplot_index : tuple
The subplot index to place the plot in
set_title : str
The title of the plot.
density : bool
Set to True to plot a p.d.f. instead of a frequency histogram.
set_shading : string
Option to to set the matplotlib.pcolormesh shading parameter.
Default to 'auto'
threshold : float
Value on which to threshold the histogram results for plotting.
Setting to 0 will ensure that all 0 values are removed from the plot
making it easier to distringuish between 0 and low values
hist_kwargs : Additional keyword arguments to pass to numpy histogram.
Other keyword arguments will be passed into :func:`matplotlib.pyplot.pcolormesh`.
Returns
-------
return_dict : dict
A dictionary containing the plot axis handle, bin boundaries, and
generated histogram.
"""
if dsname is None and len(self._ds.keys()) > 1:
raise ValueError(
'You must choose a datastream when there are 2 '
'or more datasets in the TimeSeriesDisplay '
'object.'
)
elif dsname is None:
dsname = list(self._ds.keys())[0]
ds = self._get_data(dsname, [x_field, y_field])
xdata, ydata = ds[x_field], ds[y_field]
if 'units' in xdata.attrs:
xtitle = ''.join(['(', xdata.attrs['units'], ')'])
else:
xtitle = x_field
if x_bins is not None and isinstance(x_bins, int):
x_bins = np.linspace(xdata.values.min(), xdata.values.max(), x_bins)
if y_bins is not None and isinstance(x_bins, int):
y_bins = np.linspace(ydata.values.min(), ydata.values.max(), y_bins)
if x_bins is not None and y_bins is None:
# We will defaut the y direction to have the same # of bins as x
y_bins = np.linspace(ydata.values.min(), ydata.values.max(), len(x_bins))
# Get the current plotting axis, add day/night background and plot data
if self.fig is None:
self.fig = plt.figure()
if self.axes is None:
self.axes = np.array([plt.axes()])
self.fig.add_axes(self.axes[0])
if 'units' in ydata.attrs:
ytitle = ''.join(['(', ydata.attrs['units'], ')'])
else:
ytitle = y_field
if x_bins is None:
my_hist, x_bins, y_bins = np.histogram2d(
xdata.values.flatten(), ydata.values.flatten(), density=density, **hist_kwargs
)
else:
my_hist, x_bins, y_bins = np.histogram2d(
xdata.values.flatten(),
ydata.values.flatten(),
density=density,
bins=[x_bins, y_bins],
**hist_kwargs,
)
# Adding in the ability to threshold the heatmaps
if threshold is not None:
my_hist[my_hist <= threshold] = np.nan
x_inds = (x_bins[:-1] + x_bins[1:]) / 2.0
y_inds = (y_bins[:-1] + y_bins[1:]) / 2.0
xi, yi = np.meshgrid(x_inds, y_inds, indexing='ij')
mesh = self.axes[subplot_index].pcolormesh(xi, yi, my_hist, shading=set_shading, **kwargs)
# Set Title
if set_title is None:
set_title = ' '.join(
[
dsname,
'on',
dt_utils.numpy_to_arm_date(self._ds[dsname].time.values[0]),
]
)
self.axes[subplot_index].set_title(set_title)
self.axes[subplot_index].set_ylabel(ytitle)
self.axes[subplot_index].set_xlabel(xtitle)
self.add_colorbar(mesh, title='count', subplot_index=subplot_index)
return_dict = {}
return_dict['plot_handle'] = self.axes[subplot_index]
return_dict['x_bins'] = x_bins
return_dict['y_bins'] = y_bins
return_dict['histogram'] = my_hist
return return_dict
def set_ratio_line(self, subplot_index=(0,)):
"""
Sets the 1:1 ratio line.
Parameters
----------
subplot_index : 1 or 2D tuple, list, or array
The index of the subplot to set the x range of.
"""
if self.axes is None:
raise RuntimeError('set_ratio_line requires the plot to be displayed.')
# Define the xticks of the figure
xlims = self.axes[subplot_index].get_xticks()
ratio = np.linspace(xlims, xlims[-1])
self.axes[subplot_index].plot(ratio, ratio, 'k--')
def plot_scatter(
self,
x_field,
y_field,
m_field=None,
dsname=None,
cbar_label=None,
set_title=None,
subplot_index=(0,),
**kwargs,
):
"""
This procedure will produce a scatter plot from 2 variables.
Parameters
----------
x_field : str
The name of the field to display on the X axis.
y_field : str
The name of the field to display on the Y axis.
m_field : str
The name of the field to display on the markers.
cbar_label : str
The desired name to plot for the colorbar
set_title : str
The desired title for the plot.
Default title is created from the datastream.
dsname : str or None
The name of the datastream the field is contained in. Set
to None to let ACT automatically determine this.
subplot_index : tuple
The subplot index to place the plot in
Other keyword arguments will be passed into :func:`matplotlib.pyplot.scatter`.
Returns
-------
ax : matplotlib axis handle
The matplotlib axis handle of the plot
"""
if dsname is None and len(self._ds.keys()) > 1:
raise ValueError(
'You must choose a datastream when there are 2 '
'or more datasets in the TimeSeriesDisplay '
'object.'
)
elif dsname is None:
dsname = list(self._ds.keys())[0]
if m_field is None:
mdata = None
ds = self._get_data(dsname, [x_field, y_field])
xdata, ydata = ds[x_field], ds[y_field]
else:
ds = self._get_data(dsname, [x_field, y_field, m_field])
xdata, ydata, mdata = ds[x_field], ds[y_field], ds[m_field]
# Define the x-axis label. If units are avaiable, plot.
if 'units' in xdata.attrs:
xtitle = x_field + ''.join([' (', xdata.attrs['units'], ')'])
else:
xtitle = x_field
# Define the y-axis label. If units are available, plot
if 'units' in ydata.attrs:
ytitle = y_field + ''.join([' (', ydata.attrs['units'], ')'])
else:
ytitle = y_field
# Get the current plotting axis, add day/night background and plot data
if self.fig is None:
self.fig = plt.figure()
# Define the axes for the figure
if self.axes is None:
self.axes = np.array([plt.axes()])
self.fig.add_axes(self.axes[0])
# Display the scatter plot, pass keyword args for unspecified attributes
scc = self.axes[subplot_index].scatter(xdata, ydata, c=mdata, **kwargs)
# Set Title
if set_title is None:
set_title = ' '.join(
[
dsname,
'on',
dt_utils.numpy_to_arm_date(self._ds[dsname].time.values[0]),
]
)
# Check to see if a colorbar label was set
if mdata is not None:
if cbar_label is None:
# Define the y-axis label. If units are available, plot
if 'units' in mdata.attrs:
ztitle = m_field + ''.join([' (', mdata.attrs['units'], ')'])
else:
ztitle = m_field
else:
ztitle = cbar_label
# Plot the colorbar
cbar = plt.colorbar(scc)
cbar.ax.set_ylabel(ztitle)
# Define the axe title, x-axis label, y-axis label
self.axes[subplot_index].set_title(set_title)
self.axes[subplot_index].set_ylabel(ytitle)
self.axes[subplot_index].set_xlabel(xtitle)
return self.axes[subplot_index]
def plot_violin(
self,
field,
positions=None,
dsname=None,
vert=True,
showmeans=True,
showmedians=True,
showextrema=True,
subplot_index=(0,),
set_title=None,
**kwargs,
):
"""
This procedure will produce a violin plot for the selected
field (or fields).
Parameters
----------
field : str or list
The name of the field (or fields) to display on the X axis.
positions : array-like, Default: None
The positions of the ticks along dependent axis.
dsname : str or None
The name of the datastream the field is contained in. Set
to None to let ACT automatically determine this.
vert : Boolean, Default: True
Display violin plot vertical. False will display horizontal.
showmeans : Boolean; Default: False
If True, will display the mean of the datastream.
showmedians : Boolean; Default: False
If True, will display the medium of the datastream.
showextrema: Boolean; Default: False
If True, will display the extremes of the datastream.
subplot_index : tuple
The subplot index to place the plot in
set_title : str
The title of the plot.
Other keyword arguments will be passed into :func:`matplotlib.pyplot.violinplot`.
Returns
-------
ax : matplotlib axis handle
The matplotlib axis handle of the plot
"""
if dsname is None and len(self._ds.keys()) > 1:
raise ValueError(
'You must choose a datastream when there are 2 '
'or more datasets in the TimeSeriesDisplay '
'object.'
)
if dsname is None:
dsname = list(self._ds.keys())[0]
ds = self._get_data(dsname, field)
ndata = ds[field]
# Get the current plotting axis, add day/night background and plot data
if self.fig is None:
self.fig = plt.figure()
# Define the axes for the figure
if self.axes is None:
self.axes = np.array([plt.axes()])
self.fig.add_axes(self.axes[0])
# Define the axe label. If units are avaiable, plot.
if 'units' in ndata.attrs:
axtitle = field + ''.join([' (', ndata.attrs['units'], ')'])
else:
axtitle = field
# Display the scatter plot, pass keyword args for unspecified attributes
scc = self.axes[subplot_index].violinplot(
ndata,
positions=positions,
vert=vert,
showmeans=showmeans,
showmedians=showmedians,
showextrema=showextrema,
**kwargs,
)
if showmeans is True:
scc['cmeans'].set_edgecolor('red')
scc['cmeans'].set_label('mean')
if showmedians is True:
scc['cmedians'].set_edgecolor('black')
scc['cmedians'].set_label('median')
# Set Title
if set_title is None:
set_title = ' '.join(
[
dsname,
'on',
dt_utils.numpy_to_arm_date(self._ds[dsname].time.values[0]),
]
)
# Define the axe title, x-axis label, y-axis label
self.axes[subplot_index].set_title(set_title)
if vert is True:
self.axes[subplot_index].set_ylabel(axtitle)
if positions is None:
self.axes[subplot_index].set_xticks([])
else:
self.axes[subplot_index].set_xlabel(axtitle)
if positions is None:
self.axes[subplot_index].set_yticks([])
return self.axes[subplot_index]
def plot_pie_chart(
self,
fields,
time=None,
time_slice=None,
threshold=None,
fill_value=0.0,
dsname=None,
subplot_index=(0,),
set_title=None,
autopct='%1.1f%%',
**kwargs,
):
"""
This procedure will produce a pie chart for the selected fields.
Parameters
----------
fields : list
The list of fields to calculate percentages on for the pie chart.
time : datetime
A single datetime to be passed into the act.utils.calculate percentages function
if desired. Default is None and all data will be included.
time_slice : tuple
A tuple of two datetimes to grab all data between those two datetimes for
act.utils.calculate_percentages. Default is None and all data will be included.
threshold : float
Threshold in which anything below will be considered invalid.
Default is None.
fill_value : float
Fill value for invalid data. Only used if a threshold is provided.
dsname : str or None
The name of the datastream the field is contained in. Set
to None to let ACT automatically determine this.
subplot_index : tuple
The subplot index to place the plot in
set_title : str
The title of the plot.
autopct : str
Format string for the percentages. Default is float with one
decimal place. If this parameter is set to None, no percentage
string values are displayed.
**kwargs : keywords
Keywords to pass through to :func:`matplotlib.pyplot.pie`.
Returns
-------
ax : matplotlib axis handle
The matplotlib axis handle of the plot
"""
if dsname is None and len(self._ds.keys()) > 1:
raise ValueError(
'You must choose a datastream when there are 2 '
+ 'or more datasets in the DistributionDisplay '
+ 'object.'
)
elif dsname is None:
dsname = list(self._ds.keys())[0]
# Get the current plotting axis
if self.fig is None:
self.fig = plt.figure()
if self.axes is None:
self.axes = np.array([plt.axes()])
self.fig.add_axes(self.axes[0])
# Set Title
if set_title is None:
set_title = ' '.join(
[
dsname,
'on',
dt_utils.numpy_to_arm_date(self._ds[dsname].time.values[0]),
]
)
self.axes[subplot_index].set_title(set_title)
percentages = calculate_percentages(
self._ds[dsname],
fields,
time=time,
time_slice=time_slice,
threshold=threshold,
fill_value=fill_value,
)
self.axes[subplot_index].pie(
[percentages[field] for field in percentages.keys()],
labels=percentages.keys(),
autopct=autopct,
**kwargs,
)
return self.axes[subplot_index]
<|code_end|>
examples/plotting/plot_pie_chart.py
<|code_start|>"""
Calculate and View Aerosol Percentages
--------------------------------------
Calculate the percentages of different aerosols in a Aerosol
Chemical Speciation (AOS) monitor dataset and view the percentages
in a pie chart.
Written: Zach Sherman
"""
from arm_test_data import DATASETS
import matplotlib.pyplot as plt
import act
from act.io.arm import read_arm_netcdf
# Read an ARM AOS dataset
filename = DATASETS.fetch('sgpaosacsmE13.b2.20230420.000109.nc')
ds = read_arm_netcdf(filename)
# Let us print out the fields in the dataset and see what it contains.
print(ds.data_vars.keys())
# Knowing what fields the dataset contains, let's create a list of fields
# to use in the plot.
fields = ['sulfate', 'ammonium', 'nitrate', 'chloride']
# We also want to provide some keyword arguments to avoid invalid data such
# as negative values.
threshold = 0.0
fill_value = 0.0
# Create a DistributionDisplay object to compare fields
display = act.plotting.DistributionDisplay(ds)
# We can set one of the slices to explode and give it a nice shadow.
explode = (0, 0.1, 0, 0)
shadow = True
# Create a pie chart using the fields list. The percentages of the
# fields will be calculated using act.utils.calculate_percentages.
display.plot_pie_chart(
fields,
threshold=threshold,
fill_value=fill_value,
explode=explode,
shadow=True,
)
plt.show()
<|code_end|>
|
GeographicPlotDisplay documentation missing description of return value
* ACT version: 2.1.1
* Python version: all
* Operating System: all
### Description
The GeographicPlotDisplay is missing a description of the returned matplotlib axes object. This proved to be a bit confusing on how to tell a student how to adjust the axes limits on the GeographicPlotDisplay, since doing so depends on calling the methods of the returned matplotlib axes object.
|
act/plotting/geodisplay.py
<|code_start|>"""
Stores the class for GeographicPlotDisplay.
"""
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from .plot import Display
try:
import cartopy.crs as ccrs
import cartopy.feature as cfeature
from cartopy.io import img_tiles
CARTOPY_AVAILABLE = True
except ImportError:
CARTOPY_AVAILABLE = False
class GeographicPlotDisplay(Display):
"""
A class for making geographic tracer plot of aircraft, ship or other moving
platform plot.
This is inherited from the :func:`act.plotting.Display`
class and has therefore has the same attributes as that class.
See :func:`act.plotting.Display`
for more information. There are no additional attributes or parameters
to this class.
In order to create geographic plots, ACT needs the Cartopy package to be
installed on your system. More information about
Cartopy go here:https://scitools.org.uk/cartopy/docs/latest/ .
"""
def __init__(self, ds, ds_name=None, **kwargs):
if not CARTOPY_AVAILABLE:
raise ImportError(
'Cartopy needs to be installed on your ' 'system to make geographic display plots.'
)
super().__init__(ds, ds_name, **kwargs)
if self.fig is None:
self.fig = plt.figure(**kwargs)
def geoplot(
self,
data_field=None,
lat_field='lat',
lon_field='lon',
dsname=None,
cbar_label=None,
title=None,
projection=None,
plot_buffer=0.08,
img_tile=None,
img_tile_args={},
tile=8,
cartopy_feature=None,
cmap='rainbow',
text=None,
gridlines=True,
**kwargs,
):
"""
Creates a latitude and longitude plot of a time series data set with
data values indicated by color and described with a colorbar.
Latitude values must be in degree north (-90 to 90) and
longitude must be in degree east (-180 to 180).
Parameters
----------
data_field : str
Name of data field in the dataset to plot.
lat_field : str
Name of latitude field in the dataset to use.
lon_field : str
Name of longitude field in the dataset to use.
dsname : str or None
The name of the datastream to plot. Set to None to make ACT
attempt to automatically determine this.
cbar_label : str
Label to use with colorbar. If set to None will attempt
to create label from long_name and units.
title : str
Plot title.
projection : cartopy.crs object
Project to use on plot. See
https://scitools.org.uk/cartopy/docs/latest/reference/projections.html?highlight=projections
plot_buffer : float
Buffer to add around data on plot in lat and lon dimension.
img_tile : str
Image to use for the plot background. Set to None to not use
background image. For all image background types, see:
https://scitools.org.uk/cartopy/docs/v0.16/cartopy/io/img_tiles.html
Default is None.
img_tile_args : dict
Keyword arguments for the chosen img_tile. These arguments can be
found for the corresponding img_tile here:
https://scitools.org.uk/cartopy/docs/v0.16/cartopy/io/img_tiles.html
Default is an empty dictionary.
tile : int
Tile zoom to use with background image. Higher number indicates
more resolution. A value of 8 is typical for a normal sonde plot.
cartopy_feature : list of str or str
Cartopy feature to add to plot.
cmap : str
Color map to use for colorbar.
text : dictionary
Dictionary of {text:[lon,lat]} to add to plot. Can have more
than one set of text to add.
gridlines : boolean
Use latitude and longitude gridlines.
**kwargs : keyword arguments
Any other keyword arguments that will be passed
into :func:`matplotlib.pyplot.scatter` when the figure
is made. See the matplotlib documentation for further details
on what keyword arguments are available.
"""
if dsname is None and len(self._ds.keys()) > 1:
raise ValueError(
'You must choose a datastream when there are 2 '
'or more datasets in the GeographicPlotDisplay '
'object.'
)
elif dsname is None:
dsname = list(self._ds.keys())[0]
if data_field is None:
raise ValueError('You must enter the name of the data ' 'to be plotted.')
if projection is None:
if CARTOPY_AVAILABLE:
projection = ccrs.PlateCarree()
# Extract data from the dataset
try:
lat = self._ds[dsname][lat_field].values
except KeyError:
raise ValueError(
(
'You will need to provide the name of the '
"field if not '{}' to use for latitude "
'data.'
).format(lat_field)
)
try:
lon = self._ds[dsname][lon_field].values
except KeyError:
raise ValueError(
(
'You will need to provide the name of the '
"field if not '{}' to use for longitude "
'data.'
).format(lon_field)
)
# Set up metadata information for display on plot
if cbar_label is None:
try:
cbar_label = (
self._ds[dsname][data_field].attrs['long_name']
+ ' ('
+ self._ds[dsname][data_field].attrs['units']
+ ')'
)
except KeyError:
cbar_label = data_field
lat_limits = [np.nanmin(lat), np.nanmax(lat)]
lon_limits = [np.nanmin(lon), np.nanmax(lon)]
box_size = np.max([np.abs(np.diff(lat_limits)), np.abs(np.diff(lon_limits))])
bx_buf = box_size * plot_buffer
lat_center = np.sum(lat_limits) / 2.0
lon_center = np.sum(lon_limits) / 2.0
lat_limits = [
lat_center - box_size / 2.0 - bx_buf,
lat_center + box_size / 2.0 + bx_buf,
]
lon_limits = [
lon_center - box_size / 2.0 - bx_buf,
lon_center + box_size / 2.0 + bx_buf,
]
data = self._ds[dsname][data_field].values
# Create base plot projection
ax = plt.axes(projection=projection)
plt.subplots_adjust(left=0.01, right=0.99, bottom=0.05, top=0.93)
ax.set_extent([lon_limits[0], lon_limits[1], lat_limits[0], lat_limits[1]], crs=projection)
if title is None:
try:
dim = list(self._ds[dsname][data_field].dims)
ts = pd.to_datetime(str(self._ds[dsname][dim[0]].values[0]))
date = ts.strftime('%Y-%m-%d')
time_str = ts.strftime('%H:%M:%S')
plt.title(' '.join([dsname, 'at', date, time_str]))
except NameError:
plt.title(dsname)
else:
plt.title(title)
if img_tile is not None:
tiler = getattr(img_tiles, img_tile)(**img_tile_args)
ax.add_image(tiler, tile)
colorbar_map = None
if cmap is not None:
colorbar_map = matplotlib.colormaps.get_cmap(cmap)
sc = ax.scatter(lon, lat, c=data, cmap=colorbar_map, **kwargs)
cbar = plt.colorbar(sc)
cbar.ax.set_ylabel(cbar_label)
if cartopy_feature is not None:
if isinstance(cartopy_feature, str):
cartopy_feature = [cartopy_feature]
cartopy_feature = [ii.upper() for ii in cartopy_feature]
if 'STATES' in cartopy_feature:
ax.add_feature(cfeature.STATES.with_scale('10m'))
if 'LAND' in cartopy_feature:
ax.add_feature(cfeature.LAND)
if 'OCEAN' in cartopy_feature:
ax.add_feature(cfeature.OCEAN)
if 'COASTLINE' in cartopy_feature:
ax.add_feature(cfeature.COASTLINE)
if 'BORDERS' in cartopy_feature:
ax.add_feature(cfeature.BORDERS, linestyle=':')
if 'LAKES' in cartopy_feature:
ax.add_feature(cfeature.LAKES, alpha=0.5)
if 'RIVERS' in cartopy_feature:
ax.add_feature(cfeature.RIVERS)
if text is not None:
for label, location in text.items():
ax.plot(location[0], location[1], marker='*', color='black')
ax.text(location[0], location[1], label, color='black')
if gridlines:
if projection == ccrs.PlateCarree() or projection == ccrs.Mercator:
gl = ax.gridlines(
crs=projection,
draw_labels=True,
linewidth=1,
color='gray',
alpha=0.5,
linestyle='--',
)
gl.top_labels = False
gl.left_labels = True
gl.bottom_labels = True
gl.right_labels = False
gl.xlabel_style = {'size': 6, 'color': 'gray'}
gl.ylabel_style = {'size': 6, 'color': 'gray'}
else:
# Labels are only currently supported for PlateCarree and Mercator
gl = ax.gridlines(
draw_labels=False,
linewidth=1,
color='gray',
alpha=0.5,
linestyle='--',
)
return ax
<|code_end|>
|
act/plotting/geodisplay.py
<|code_start|>"""
Stores the class for GeographicPlotDisplay.
"""
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from .plot import Display
try:
import cartopy.crs as ccrs
import cartopy.feature as cfeature
from cartopy.io import img_tiles
CARTOPY_AVAILABLE = True
except ImportError:
CARTOPY_AVAILABLE = False
class GeographicPlotDisplay(Display):
"""
A class for making geographic tracer plot of aircraft, ship or other moving
platform plot.
This is inherited from the :func:`act.plotting.Display`
class and has therefore has the same attributes as that class.
See :func:`act.plotting.Display`
for more information. There are no additional attributes or parameters
to this class.
In order to create geographic plots, ACT needs the Cartopy package to be
installed on your system. More information about
Cartopy go here:https://scitools.org.uk/cartopy/docs/latest/ .
"""
def __init__(self, ds, ds_name=None, **kwargs):
if not CARTOPY_AVAILABLE:
raise ImportError(
'Cartopy needs to be installed on your ' 'system to make geographic display plots.'
)
super().__init__(ds, ds_name, **kwargs)
if self.fig is None:
self.fig = plt.figure(**kwargs)
def geoplot(
self,
data_field=None,
lat_field='lat',
lon_field='lon',
dsname=None,
cbar_label=None,
title=None,
projection=None,
plot_buffer=0.08,
img_tile=None,
img_tile_args={},
tile=8,
cartopy_feature=None,
cmap='rainbow',
text=None,
gridlines=True,
**kwargs,
):
"""
Creates a latitude and longitude plot of a time series data set with
data values indicated by color and described with a colorbar.
Latitude values must be in degree north (-90 to 90) and
longitude must be in degree east (-180 to 180).
Parameters
----------
data_field : str
Name of data field in the dataset to plot.
lat_field : str
Name of latitude field in the dataset to use.
lon_field : str
Name of longitude field in the dataset to use.
dsname : str or None
The name of the datastream to plot. Set to None to make ACT
attempt to automatically determine this.
cbar_label : str
Label to use with colorbar. If set to None will attempt
to create label from long_name and units.
title : str
Plot title.
projection : cartopy.crs object
Project to use on plot. See
https://scitools.org.uk/cartopy/docs/latest/reference/projections.html?highlight=projections
plot_buffer : float
Buffer to add around data on plot in lat and lon dimension.
img_tile : str
Image to use for the plot background. Set to None to not use
background image. For all image background types, see:
https://scitools.org.uk/cartopy/docs/v0.16/cartopy/io/img_tiles.html
Default is None.
img_tile_args : dict
Keyword arguments for the chosen img_tile. These arguments can be
found for the corresponding img_tile here:
https://scitools.org.uk/cartopy/docs/v0.16/cartopy/io/img_tiles.html
Default is an empty dictionary.
tile : int
Tile zoom to use with background image. Higher number indicates
more resolution. A value of 8 is typical for a normal sonde plot.
cartopy_feature : list of str or str
Cartopy feature to add to plot.
cmap : str
Color map to use for colorbar.
text : dictionary
Dictionary of {text:[lon,lat]} to add to plot. Can have more
than one set of text to add.
gridlines : boolean
Use latitude and longitude gridlines.
**kwargs : keyword arguments
Any other keyword arguments that will be passed
into :func:`matplotlib.pyplot.scatter` when the figure
is made. See the matplotlib documentation for further details
on what keyword arguments are available.
Returns
-------
ax : matplotlib axis handle
The matplotlib axis handle of the plot.
"""
if dsname is None and len(self._ds.keys()) > 1:
raise ValueError(
'You must choose a datastream when there are 2 '
'or more datasets in the GeographicPlotDisplay '
'object.'
)
elif dsname is None:
dsname = list(self._ds.keys())[0]
if data_field is None:
raise ValueError('You must enter the name of the data ' 'to be plotted.')
if projection is None:
if CARTOPY_AVAILABLE:
projection = ccrs.PlateCarree()
# Extract data from the dataset
try:
lat = self._ds[dsname][lat_field].values
except KeyError:
raise ValueError(
(
'You will need to provide the name of the '
"field if not '{}' to use for latitude "
'data.'
).format(lat_field)
)
try:
lon = self._ds[dsname][lon_field].values
except KeyError:
raise ValueError(
(
'You will need to provide the name of the '
"field if not '{}' to use for longitude "
'data.'
).format(lon_field)
)
# Set up metadata information for display on plot
if cbar_label is None:
try:
cbar_label = (
self._ds[dsname][data_field].attrs['long_name']
+ ' ('
+ self._ds[dsname][data_field].attrs['units']
+ ')'
)
except KeyError:
cbar_label = data_field
lat_limits = [np.nanmin(lat), np.nanmax(lat)]
lon_limits = [np.nanmin(lon), np.nanmax(lon)]
box_size = np.max([np.abs(np.diff(lat_limits)), np.abs(np.diff(lon_limits))])
bx_buf = box_size * plot_buffer
lat_center = np.sum(lat_limits) / 2.0
lon_center = np.sum(lon_limits) / 2.0
lat_limits = [
lat_center - box_size / 2.0 - bx_buf,
lat_center + box_size / 2.0 + bx_buf,
]
lon_limits = [
lon_center - box_size / 2.0 - bx_buf,
lon_center + box_size / 2.0 + bx_buf,
]
data = self._ds[dsname][data_field].values
# Create base plot projection
ax = plt.axes(projection=projection)
plt.subplots_adjust(left=0.01, right=0.99, bottom=0.05, top=0.93)
ax.set_extent([lon_limits[0], lon_limits[1], lat_limits[0], lat_limits[1]], crs=projection)
if title is None:
try:
dim = list(self._ds[dsname][data_field].dims)
ts = pd.to_datetime(str(self._ds[dsname][dim[0]].values[0]))
date = ts.strftime('%Y-%m-%d')
time_str = ts.strftime('%H:%M:%S')
plt.title(' '.join([dsname, 'at', date, time_str]))
except NameError:
plt.title(dsname)
else:
plt.title(title)
if img_tile is not None:
tiler = getattr(img_tiles, img_tile)(**img_tile_args)
ax.add_image(tiler, tile)
colorbar_map = None
if cmap is not None:
colorbar_map = matplotlib.colormaps.get_cmap(cmap)
sc = ax.scatter(lon, lat, c=data, cmap=colorbar_map, **kwargs)
cbar = plt.colorbar(sc)
cbar.ax.set_ylabel(cbar_label)
if cartopy_feature is not None:
if isinstance(cartopy_feature, str):
cartopy_feature = [cartopy_feature]
cartopy_feature = [ii.upper() for ii in cartopy_feature]
if 'STATES' in cartopy_feature:
ax.add_feature(cfeature.STATES.with_scale('10m'))
if 'LAND' in cartopy_feature:
ax.add_feature(cfeature.LAND)
if 'OCEAN' in cartopy_feature:
ax.add_feature(cfeature.OCEAN)
if 'COASTLINE' in cartopy_feature:
ax.add_feature(cfeature.COASTLINE)
if 'BORDERS' in cartopy_feature:
ax.add_feature(cfeature.BORDERS, linestyle=':')
if 'LAKES' in cartopy_feature:
ax.add_feature(cfeature.LAKES, alpha=0.5)
if 'RIVERS' in cartopy_feature:
ax.add_feature(cfeature.RIVERS)
if text is not None:
for label, location in text.items():
ax.plot(location[0], location[1], marker='*', color='black')
ax.text(location[0], location[1], label, color='black')
if gridlines:
if projection == ccrs.PlateCarree() or projection == ccrs.Mercator:
gl = ax.gridlines(
crs=projection,
draw_labels=True,
linewidth=1,
color='gray',
alpha=0.5,
linestyle='--',
)
gl.top_labels = False
gl.left_labels = True
gl.bottom_labels = True
gl.right_labels = False
gl.xlabel_style = {'size': 6, 'color': 'gray'}
gl.ylabel_style = {'size': 6, 'color': 'gray'}
else:
# Labels are only currently supported for PlateCarree and Mercator
gl = ax.gridlines(
draw_labels=False,
linewidth=1,
color='gray',
alpha=0.5,
linestyle='--',
)
return ax
<|code_end|>
|
AmeriFlux Documentation is not showing up in the API
The new act.io.ameriflux code is not showing up in the documentation.
|
act/io/__init__.py
<|code_start|>"""
This module contains procedures for reading and writing various ARM datasets.
"""
import lazy_loader as lazy
__getattr__, __dir__, __all__ = lazy.attach(
__name__,
submodules=[
'arm',
'ameriflux',
'text',
'icartt',
'mpl',
'neon',
'noaagml',
'noaapsl',
'pysp2',
'hysplit',
],
submod_attrs={
'arm': [
'WriteDataset',
'check_arm_standards',
'create_ds_from_arm_dod',
'read_arm_netcdf',
'check_if_tar_gz_file',
'read_arm_mmcr',
],
'ameriflux': ['format_as_ameriflux'],
'text': ['read_csv'],
'icartt': ['read_icartt'],
'mpl': ['proc_sigma_mplv5_read', 'read_sigma_mplv5'],
'neon': ['read_neon_csv'],
'noaagml': [
'read_gml',
'read_gml_co2',
'read_gml_halo',
'read_gml_met',
'read_gml_ozone',
'read_gml_radiation',
'read_surfrad',
],
'noaapsl': [
'read_psl_wind_profiler',
'read_psl_wind_profiler_temperature',
'read_psl_parsivel',
'read_psl_radar_fmcw_moment',
'read_psl_surface_met',
],
'pysp2': ['read_hk_file', 'read_sp2', 'read_sp2_dat'],
'sodar': ['read_mfas_sodar'],
'hysplit': ['read_hysplit'],
},
)
<|code_end|>
|
act/io/__init__.py
<|code_start|>"""
This module contains procedures for reading and writing various ARM datasets.
"""
import lazy_loader as lazy
__getattr__, __dir__, __all__ = lazy.attach(
__name__,
submodules=[
'arm',
'ameriflux',
'text',
'icartt',
'mpl',
'neon',
'noaagml',
'noaapsl',
'pysp2',
'hysplit',
],
submod_attrs={
'arm': [
'WriteDataset',
'check_arm_standards',
'create_ds_from_arm_dod',
'read_arm_netcdf',
'check_if_tar_gz_file',
'read_arm_mmcr',
],
'ameriflux': ['convert_to_ameriflux'],
'text': ['read_csv'],
'icartt': ['read_icartt'],
'mpl': ['proc_sigma_mplv5_read', 'read_sigma_mplv5'],
'neon': ['read_neon_csv'],
'noaagml': [
'read_gml',
'read_gml_co2',
'read_gml_halo',
'read_gml_met',
'read_gml_ozone',
'read_gml_radiation',
'read_surfrad',
],
'noaapsl': [
'read_psl_wind_profiler',
'read_psl_wind_profiler_temperature',
'read_psl_parsivel',
'read_psl_radar_fmcw_moment',
'read_psl_surface_met',
],
'pysp2': ['read_hk_file', 'read_sp2', 'read_sp2_dat'],
'sodar': ['read_mfas_sodar'],
'hysplit': ['read_hysplit'],
},
)
<|code_end|>
|
Fix faces using GFPGAN + high batch count tends to fail 6GB VRAM
6GB is already on a knife's edge, and this seems to push it over. However it doesn't do it every single time, and I have not seen it happen when using batch count 1, even when hitting submit repeatedly.
RuntimeError: CUDA out of memory. Tried to allocate 1024.00 MiB (GPU 0; 6.00 GiB total capacity; 3.30 GiB already allocated; 0 bytes free; 4.60 GiB reserved in total by PyTorch) If reserved memory is >> allocated memory try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF
|
webui.py
<|code_start|>import argparse, os, sys, glob
from collections import namedtuple
import torch
import torch.nn as nn
import numpy as np
import gradio as gr
from omegaconf import OmegaConf
from PIL import Image, ImageFont, ImageDraw, PngImagePlugin
from itertools import islice
from einops import rearrange, repeat
from torch import autocast
import mimetypes
import random
import math
import html
import time
import json
import traceback
import k_diffusion.sampling
from ldm.util import instantiate_from_config
from ldm.models.diffusion.ddim import DDIMSampler
from ldm.models.diffusion.plms import PLMSSampler
import ldm.modules.encoders.modules
try:
# this silences the annoying "Some weights of the model checkpoint were not used when initializing..." message at start.
from transformers import logging
logging.set_verbosity_error()
except:
pass
# this is a fix for Windows users. Without it, javascript files will be served with text/html content-type and the bowser will not show any UI
mimetypes.init()
mimetypes.add_type('application/javascript', '.js')
# some of those options should not be changed at all because they would break the model, so I removed them from options.
opt_C = 4
opt_f = 8
LANCZOS = (Image.Resampling.LANCZOS if hasattr(Image, 'Resampling') else Image.LANCZOS)
invalid_filename_chars = '<>:"/\|?*\n'
config_filename = "config.json"
parser = argparse.ArgumentParser()
parser.add_argument("--config", type=str, default="configs/stable-diffusion/v1-inference.yaml", help="path to config which constructs model",)
parser.add_argument("--ckpt", type=str, default="models/ldm/stable-diffusion-v1/model.ckpt", help="path to checkpoint of model",)
parser.add_argument("--gfpgan-dir", type=str, help="GFPGAN directory", default=('./src/gfpgan' if os.path.exists('./src/gfpgan') else './GFPGAN')) # i disagree with where you're putting it but since all guidefags are doing it this way, there you go
parser.add_argument("--no-half", action='store_true', help="do not switch the model to 16-bit floats")
parser.add_argument("--no-progressbar-hiding", action='store_true', help="do not hide progressbar in gradio UI (we hide it because it slows down ML if you have hardware accleration in browser)")
parser.add_argument("--max-batch-count", type=int, default=16, help="maximum batch count value for the UI")
parser.add_argument("--embeddings-dir", type=str, default='embeddings', help="embeddings dirtectory for textual inversion (default: embeddings)")
cmd_opts = parser.parse_args()
css_hide_progressbar = """
.wrap .m-12 svg { display:none!important; }
.wrap .m-12::before { content:"Loading..." }
.progress-bar { display:none!important; }
.meta-text { display:none!important; }
"""
SamplerData = namedtuple('SamplerData', ['name', 'constructor'])
samplers = [
*[SamplerData(x[0], lambda m, funcname=x[1]: KDiffusionSampler(m, funcname)) for x in [
('LMS', 'sample_lms'),
('Heun', 'sample_heun'),
('Euler', 'sample_euler'),
('Euler ancestral', 'sample_euler_ancestral'),
('DPM 2', 'sample_dpm_2'),
('DPM 2 Ancestral', 'sample_dpm_2_ancestral'),
] if hasattr(k_diffusion.sampling, x[1])],
SamplerData('DDIM', lambda m: DDIMSampler(model)),
SamplerData('PLMS', lambda m: PLMSSampler(model)),
]
samplers_for_img2img = [x for x in samplers if x.name != 'DDIM' and x.name != 'PLMS']
RealesrganModelInfo = namedtuple("RealesrganModelInfo", ["name", "location", "model", "netscale"])
try:
from basicsr.archs.rrdbnet_arch import RRDBNet
from realesrgan import RealESRGANer
from realesrgan.archs.srvgg_arch import SRVGGNetCompact
realesrgan_models = [
RealesrganModelInfo(
name="Real-ESRGAN 2x plus",
location="https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.1/RealESRGAN_x2plus.pth",
netscale=2, model=lambda: RRDBNet(num_in_ch=3, num_out_ch=3, num_feat=64, num_block=23, num_grow_ch=32, scale=2)
),
RealesrganModelInfo(
name="Real-ESRGAN 4x plus",
location="https://github.com/xinntao/Real-ESRGAN/releases/download/v0.1.0/RealESRGAN_x4plus.pth",
netscale=4, model=lambda: RRDBNet(num_in_ch=3, num_out_ch=3, num_feat=64, num_block=23, num_grow_ch=32, scale=4)
),
RealesrganModelInfo(
name="Real-ESRGAN 4x plus anime 6B",
location="https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.2.4/RealESRGAN_x4plus_anime_6B.pth",
netscale=4, model=lambda: RRDBNet(num_in_ch=3, num_out_ch=3, num_feat=64, num_block=6, num_grow_ch=32, scale=4)
),
]
have_realesrgan = True
except:
print("Error loading Real-ESRGAN:", file=sys.stderr)
print(traceback.format_exc(), file=sys.stderr)
realesrgan_models = [RealesrganModelInfo('None', '', 0, None)]
have_realesrgan = False
class Options:
data = None
data_labels = {
"outdir": ("", "Output dictectory; if empty, defaults to 'outputs/*'"),
"samples_save": (True, "Save indiviual samples"),
"samples_format": ('png', 'File format for indiviual samples'),
"grid_save": (True, "Save image grids"),
"grid_format": ('png', 'File format for grids'),
"grid_extended_filename": (False, "Add extended info (seed, prompt) to filename when saving grid"),
"n_rows": (-1, "Grid row count; use -1 for autodetect and 0 for it to be same as batch size", -1, 16),
"jpeg_quality": (80, "Quality for saved jpeg images", 1, 100),
"verify_input": (True, "Check input, and produce warning if it's too long"),
"enable_pnginfo": (True, "Save text information about generation parameters as chunks to png files"),
"prompt_matrix_add_to_start": (True, "In prompt matrix, add the variable combination of text to the start of the prompt, rather than the end"),
}
def __init__(self):
self.data = {k: v[0] for k, v in self.data_labels.items()}
def __setattr__(self, key, value):
if self.data is not None:
if key in self.data:
self.data[key] = value
return super(Options, self).__setattr__(key, value)
def __getattr__(self, item):
if self.data is not None:
if item in self.data:
return self.data[item]
if item in self.data_labels:
return self.data_labels[item][0]
return super(Options, self).__getattribute__(item)
def save(self, filename):
with open(filename, "w", encoding="utf8") as file:
json.dump(self.data, file)
def load(self, filename):
with open(filename, "r", encoding="utf8") as file:
self.data = json.load(file)
def chunk(it, size):
it = iter(it)
return iter(lambda: tuple(islice(it, size)), ())
def load_model_from_config(config, ckpt, verbose=False):
print(f"Loading model from {ckpt}")
pl_sd = torch.load(ckpt, map_location="cpu")
if "global_step" in pl_sd:
print(f"Global Step: {pl_sd['global_step']}")
sd = pl_sd["state_dict"]
model = instantiate_from_config(config.model)
m, u = model.load_state_dict(sd, strict=False)
if len(m) > 0 and verbose:
print("missing keys:")
print(m)
if len(u) > 0 and verbose:
print("unexpected keys:")
print(u)
model.cuda()
model.eval()
return model
class CFGDenoiser(nn.Module):
def __init__(self, model):
super().__init__()
self.inner_model = model
def forward(self, x, sigma, uncond, cond, cond_scale):
x_in = torch.cat([x] * 2)
sigma_in = torch.cat([sigma] * 2)
cond_in = torch.cat([uncond, cond])
uncond, cond = self.inner_model(x_in, sigma_in, cond=cond_in).chunk(2)
return uncond + (cond - uncond) * cond_scale
class KDiffusionSampler:
def __init__(self, m, funcname):
self.model = m
self.model_wrap = k_diffusion.external.CompVisDenoiser(m)
self.funcname = funcname
self.func = getattr(k_diffusion.sampling, self.funcname)
def sample(self, S, conditioning, batch_size, shape, verbose, unconditional_guidance_scale, unconditional_conditioning, eta, x_T):
sigmas = self.model_wrap.get_sigmas(S)
x = x_T * sigmas[0]
model_wrap_cfg = CFGDenoiser(self.model_wrap)
samples_ddim = self.func(model_wrap_cfg, x, sigmas, extra_args={'cond': conditioning, 'uncond': unconditional_conditioning, 'cond_scale': unconditional_guidance_scale}, disable=False)
return samples_ddim, None
def create_random_tensors(shape, seeds):
xs = []
for seed in seeds:
torch.manual_seed(seed)
# randn results depend on device; gpu and cpu get different results for same seed;
# the way I see it, it's better to do this on CPU, so that everyone gets same result;
# but the original script had it like this so i do not dare change it for now because
# it will break everyone's seeds.
xs.append(torch.randn(shape, device=device))
x = torch.stack(xs)
return x
def torch_gc():
torch.cuda.empty_cache()
torch.cuda.ipc_collect()
def save_image(image, path, basename, seed, prompt, extension, info=None, short_filename=False):
prompt = sanitize_filename_part(prompt)
if short_filename:
filename = f"{basename}.{extension}"
else:
filename = f"{basename}-{seed}-{prompt[:128]}.{extension}"
if extension == 'png' and opts.enable_pnginfo and info is not None:
pnginfo = PngImagePlugin.PngInfo()
pnginfo.add_text("parameters", info)
else:
pnginfo = None
image.save(os.path.join(path, filename), quality=opts.jpeg_quality, pnginfo=pnginfo)
def sanitize_filename_part(text):
return text.replace(' ', '_').translate({ord(x): '' for x in invalid_filename_chars})[:128]
def plaintext_to_html(text):
text = "".join([f"<p>{html.escape(x)}</p>\n" for x in text.split('\n')])
return text
def load_GFPGAN():
model_name = 'GFPGANv1.3'
model_path = os.path.join(cmd_opts.gfpgan_dir, 'experiments/pretrained_models', model_name + '.pth')
if not os.path.isfile(model_path):
raise Exception("GFPGAN model not found at path "+model_path)
sys.path.append(os.path.abspath(cmd_opts.gfpgan_dir))
from gfpgan import GFPGANer
return GFPGANer(model_path=model_path, upscale=1, arch='clean', channel_multiplier=2, bg_upsampler=None)
def image_grid(imgs, batch_size, force_n_rows=None):
if force_n_rows is not None:
rows = force_n_rows
elif opts.n_rows > 0:
rows = opts.n_rows
elif opts.n_rows == 0:
rows = batch_size
else:
rows = math.sqrt(len(imgs))
rows = round(rows)
cols = math.ceil(len(imgs) / rows)
w, h = imgs[0].size
grid = Image.new('RGB', size=(cols * w, rows * h), color='black')
for i, img in enumerate(imgs):
grid.paste(img, box=(i % cols * w, i // cols * h))
return grid
def draw_prompt_matrix(im, width, height, all_prompts):
def wrap(text, d, font, line_length):
lines = ['']
for word in text.split():
line = f'{lines[-1]} {word}'.strip()
if d.textlength(line, font=font) <= line_length:
lines[-1] = line
else:
lines.append(word)
return '\n'.join(lines)
def draw_texts(pos, x, y, texts, sizes):
for i, (text, size) in enumerate(zip(texts, sizes)):
active = pos & (1 << i) != 0
if not active:
text = '\u0336'.join(text) + '\u0336'
d.multiline_text((x, y + size[1] / 2), text, font=fnt, fill=color_active if active else color_inactive, anchor="mm", align="center")
y += size[1] + line_spacing
fontsize = (width + height) // 25
line_spacing = fontsize // 2
fnt = ImageFont.truetype("arial.ttf", fontsize)
color_active = (0, 0, 0)
color_inactive = (153, 153, 153)
pad_top = height // 4
pad_left = width * 3 // 4 if len(all_prompts) > 2 else 0
cols = im.width // width
rows = im.height // height
prompts = all_prompts[1:]
result = Image.new("RGB", (im.width + pad_left, im.height + pad_top), "white")
result.paste(im, (pad_left, pad_top))
d = ImageDraw.Draw(result)
boundary = math.ceil(len(prompts) / 2)
prompts_horiz = [wrap(x, d, fnt, width) for x in prompts[:boundary]]
prompts_vert = [wrap(x, d, fnt, pad_left) for x in prompts[boundary:]]
sizes_hor = [(x[2] - x[0], x[3] - x[1]) for x in [d.multiline_textbbox((0, 0), x, font=fnt) for x in prompts_horiz]]
sizes_ver = [(x[2] - x[0], x[3] - x[1]) for x in [d.multiline_textbbox((0, 0), x, font=fnt) for x in prompts_vert]]
hor_text_height = sum([x[1] + line_spacing for x in sizes_hor]) - line_spacing
ver_text_height = sum([x[1] + line_spacing for x in sizes_ver]) - line_spacing
for col in range(cols):
x = pad_left + width * col + width / 2
y = pad_top / 2 - hor_text_height / 2
draw_texts(col, x, y, prompts_horiz, sizes_hor)
for row in range(rows):
x = pad_left / 2
y = pad_top + height * row + height / 2 - ver_text_height / 2
draw_texts(row, x, y, prompts_vert, sizes_ver)
return result
def resize_image(resize_mode, im, width, height):
if resize_mode == 0:
res = im.resize((width, height), resample=LANCZOS)
elif resize_mode == 1:
ratio = width / height
src_ratio = im.width / im.height
src_w = width if ratio > src_ratio else im.width * height // im.height
src_h = height if ratio <= src_ratio else im.height * width // im.width
resized = im.resize((src_w, src_h), resample=LANCZOS)
res = Image.new("RGB", (width, height))
res.paste(resized, box=(width // 2 - src_w // 2, height // 2 - src_h // 2))
else:
ratio = width / height
src_ratio = im.width / im.height
src_w = width if ratio < src_ratio else im.width * height // im.height
src_h = height if ratio >= src_ratio else im.height * width // im.width
resized = im.resize((src_w, src_h), resample=LANCZOS)
res = Image.new("RGB", (width, height))
res.paste(resized, box=(width // 2 - src_w // 2, height // 2 - src_h // 2))
if ratio < src_ratio:
fill_height = height // 2 - src_h // 2
res.paste(resized.resize((width, fill_height), box=(0, 0, width, 0)), box=(0, 0))
res.paste(resized.resize((width, fill_height), box=(0, resized.height, width, resized.height)), box=(0, fill_height + src_h))
elif ratio > src_ratio:
fill_width = width // 2 - src_w // 2
res.paste(resized.resize((fill_width, height), box=(0, 0, 0, height)), box=(0, 0))
res.paste(resized.resize((fill_width, height), box=(resized.width, 0, resized.width, height)), box=(fill_width + src_w, 0))
return res
def check_prompt_length(prompt, comments):
"""this function tests if prompt is too long, and if so, adds a message to comments"""
tokenizer = model.cond_stage_model.tokenizer
max_length = model.cond_stage_model.max_length
info = model.cond_stage_model.tokenizer([prompt], truncation=True, max_length=max_length, return_overflowing_tokens=True, padding="max_length", return_tensors="pt")
ovf = info['overflowing_tokens'][0]
overflowing_count = ovf.shape[0]
if overflowing_count == 0:
return
vocab = {v: k for k, v in tokenizer.get_vocab().items()}
overflowing_words = [vocab.get(int(x), "") for x in ovf]
overflowing_text = tokenizer.convert_tokens_to_string(''.join(overflowing_words))
comments.append(f"Warning: too many input tokens; some ({len(overflowing_words)}) have been truncated:\n{overflowing_text}\n")
def wrap_gradio_call(func):
def f(*p1, **p2):
t = time.perf_counter()
res = list(func(*p1, **p2))
elapsed = time.perf_counter() - t
# last item is always HTML
res[-1] = res[-1] + f"<p class='performance'>Time taken: {elapsed:.2f}s</p>"
return tuple(res)
return f
GFPGAN = None
if os.path.exists(cmd_opts.gfpgan_dir):
try:
GFPGAN = load_GFPGAN()
print("Loaded GFPGAN")
except Exception:
print("Error loading GFPGAN:", file=sys.stderr)
print(traceback.format_exc(), file=sys.stderr)
class TextInversionEmbeddings:
ids_lookup = {}
word_embeddings = {}
word_embeddings_checksums = {}
fixes = []
used_custom_terms = []
dir_mtime = None
def load(self, dir, model):
mt = os.path.getmtime(dir)
if self.dir_mtime is not None and mt <= self.dir_mtime:
return
self.dir_mtime = mt
self.ids_lookup.clear()
self.word_embeddings.clear()
tokenizer = model.cond_stage_model.tokenizer
def const_hash(a):
r = 0
for v in a:
r = (r * 281 ^ int(v) * 997) & 0xFFFFFFFF
return r
def process_file(path, filename):
name = os.path.splitext(filename)[0]
data = torch.load(path)
param_dict = data['string_to_param']
assert len(param_dict) == 1, 'embedding file has multiple terms in it'
emb = next(iter(param_dict.items()))[1].reshape(768)
self.word_embeddings[name] = emb
self.word_embeddings_checksums[name] = f'{const_hash(emb)&0xffff:04x}'
ids = tokenizer([name], add_special_tokens=False)['input_ids'][0]
first_id = ids[0]
if first_id not in self.ids_lookup:
self.ids_lookup[first_id] = []
self.ids_lookup[first_id].append((ids, name))
for fn in os.listdir(dir):
try:
process_file(os.path.join(dir, fn), fn)
except:
print(f"Error loading emedding {fn}:", file=sys.stderr)
print(traceback.format_exc(), file=sys.stderr)
continue
print(f"Loaded a total of {len(self.word_embeddings)} text inversion embeddings.")
def hijack(self, m):
model_embeddings = m.cond_stage_model.transformer.text_model.embeddings
model_embeddings.token_embedding = EmbeddingsWithFixes(model_embeddings.token_embedding, self)
m.cond_stage_model = FrozenCLIPEmbedderWithCustomWords(m.cond_stage_model, self)
class FrozenCLIPEmbedderWithCustomWords(torch.nn.Module):
def __init__(self, wrapped, embeddings):
super().__init__()
self.wrapped = wrapped
self.embeddings = embeddings
self.tokenizer = wrapped.tokenizer
self.max_length = wrapped.max_length
def forward(self, text):
self.embeddings.fixes = []
self.embeddings.used_custom_terms = []
remade_batch_tokens = []
id_start = self.wrapped.tokenizer.bos_token_id
id_end = self.wrapped.tokenizer.eos_token_id
maxlen = self.wrapped.max_length - 2
cache = {}
batch_tokens = self.wrapped.tokenizer(text, truncation=False, add_special_tokens=False)["input_ids"]
for tokens in batch_tokens:
tuple_tokens = tuple(tokens)
if tuple_tokens in cache:
remade_tokens, fixes = cache[tuple_tokens]
else:
fixes = []
remade_tokens = []
i = 0
while i < len(tokens):
token = tokens[i]
possible_matches = self.embeddings.ids_lookup.get(token, None)
if possible_matches is None:
remade_tokens.append(token)
else:
found = False
for ids, word in possible_matches:
if tokens[i:i+len(ids)] == ids:
fixes.append((len(remade_tokens), word))
remade_tokens.append(777)
i += len(ids) - 1
found = True
self.embeddings.used_custom_terms.append((word, self.embeddings.word_embeddings_checksums[word]))
break
if not found:
remade_tokens.append(token)
i += 1
remade_tokens = remade_tokens + [id_end] * (maxlen - 2 - len(remade_tokens))
remade_tokens = [id_start] + remade_tokens[0:maxlen-2] + [id_end]
cache[tuple_tokens] = (remade_tokens, fixes)
remade_batch_tokens.append(remade_tokens)
self.embeddings.fixes.append(fixes)
tokens = torch.asarray(remade_batch_tokens).to(self.wrapped.device)
outputs = self.wrapped.transformer(input_ids=tokens)
z = outputs.last_hidden_state
return z
class EmbeddingsWithFixes(nn.Module):
def __init__(self, wrapped, embeddings):
super().__init__()
self.wrapped = wrapped
self.embeddings = embeddings
def forward(self, input_ids):
batch_fixes = self.embeddings.fixes
self.embeddings.fixes = []
inputs_embeds = self.wrapped(input_ids)
for fixes, tensor in zip(batch_fixes, inputs_embeds):
for offset, word in fixes:
tensor[offset] = self.embeddings.word_embeddings[word]
return inputs_embeds
def get_learned_conditioning_with_embeddings(model, prompts):
if os.path.exists(cmd_opts.embeddings_dir):
text_inversion_embeddings.load(cmd_opts.embeddings_dir, model)
return model.get_learned_conditioning(prompts)
def process_images(outpath, func_init, func_sample, prompt, seed, sampler_index, batch_size, n_iter, steps, cfg_scale, width, height, prompt_matrix, use_GFPGAN, do_not_save_grid=False, extra_generation_params=None):
"""this is the main loop that both txt2img and img2img use; it calls func_init once inside all the scopes and func_sample once per batch"""
assert prompt is not None
torch_gc()
if seed == -1:
seed = random.randrange(4294967294)
seed = int(seed)
os.makedirs(outpath, exist_ok=True)
sample_path = os.path.join(outpath, "samples")
os.makedirs(sample_path, exist_ok=True)
base_count = len(os.listdir(sample_path))
grid_count = len(os.listdir(outpath)) - 1
comments = []
prompt_matrix_parts = []
if prompt_matrix:
all_prompts = []
prompt_matrix_parts = prompt.split("|")
combination_count = 2 ** (len(prompt_matrix_parts) - 1)
for combination_num in range(combination_count):
selected_prompts = [text.strip().strip(',') for n, text in enumerate(prompt_matrix_parts[1:]) if combination_num & (1<<n)]
if opts.prompt_matrix_add_to_start:
selected_prompts = selected_prompts + [prompt_matrix_parts[0]]
else:
selected_prompts = [prompt_matrix_parts[0]] + selected_prompts
all_prompts.append( ", ".join(selected_prompts))
n_iter = math.ceil(len(all_prompts) / batch_size)
all_seeds = len(all_prompts) * [seed]
print(f"Prompt matrix will create {len(all_prompts)} images using a total of {n_iter} batches.")
else:
if opts.verify_input:
try:
check_prompt_length(prompt, comments)
except:
import traceback
print("Error verifying input:", file=sys.stderr)
print(traceback.format_exc(), file=sys.stderr)
all_prompts = batch_size * n_iter * [prompt]
all_seeds = [seed + x for x in range(len(all_prompts))]
generation_params = {
"Steps": steps,
"Sampler": samplers[sampler_index].name,
"CFG scale": cfg_scale,
"Seed": seed,
"GFPGAN": ("GFPGAN" if use_GFPGAN and GFPGAN is not None else None)
}
if extra_generation_params is not None:
generation_params.update(extra_generation_params)
generation_params_text = ", ".join([k if k == v else f'{k}: {v}' for k, v in generation_params.items() if v is not None])
def infotext():
return f"{prompt}\n{generation_params_text}".strip() + "".join(["\n\n" + x for x in comments])
if os.path.exists(cmd_opts.embeddings_dir):
text_inversion_embeddings.load(cmd_opts.embeddings_dir, model)
output_images = []
with torch.no_grad(), autocast("cuda"), model.ema_scope():
init_data = func_init()
for n in range(n_iter):
prompts = all_prompts[n * batch_size:(n + 1) * batch_size]
seeds = all_seeds[n * batch_size:(n + 1) * batch_size]
uc = model.get_learned_conditioning(len(prompts) * [""])
c = model.get_learned_conditioning(prompts)
if len(text_inversion_embeddings.used_custom_terms) > 0:
comments.append("Used custom terms: " + ", ".join([f'{word} [{checksum}]' for word, checksum in text_inversion_embeddings.used_custom_terms]))
# we manually generate all input noises because each one should have a specific seed
x = create_random_tensors([opt_C, height // opt_f, width // opt_f], seeds=seeds)
samples_ddim = func_sample(init_data=init_data, x=x, conditioning=c, unconditional_conditioning=uc)
x_samples_ddim = model.decode_first_stage(samples_ddim)
x_samples_ddim = torch.clamp((x_samples_ddim + 1.0) / 2.0, min=0.0, max=1.0)
if prompt_matrix or opts.samples_save or opts.grid_save:
for i, x_sample in enumerate(x_samples_ddim):
x_sample = 255. * rearrange(x_sample.cpu().numpy(), 'c h w -> h w c')
x_sample = x_sample.astype(np.uint8)
if use_GFPGAN and GFPGAN is not None:
cropped_faces, restored_faces, restored_img = GFPGAN.enhance(x_sample, has_aligned=False, only_center_face=False, paste_back=True)
x_sample = restored_img
image = Image.fromarray(x_sample)
save_image(image, sample_path, f"{base_count:05}", seeds[i], prompts[i], opts.samples_format, info=infotext())
output_images.append(image)
base_count += 1
if (prompt_matrix or opts.grid_save) and not do_not_save_grid:
if prompt_matrix:
grid = image_grid(output_images, batch_size, force_n_rows=1 << ((len(prompt_matrix_parts)-1)//2))
try:
grid = draw_prompt_matrix(grid, width, height, prompt_matrix_parts)
except:
import traceback
print("Error creating prompt_matrix text:", file=sys.stderr)
print(traceback.format_exc(), file=sys.stderr)
output_images.insert(0, grid)
else:
grid = image_grid(output_images, batch_size)
save_image(grid, outpath, f"grid-{grid_count:04}", seed, prompt, opts.grid_format, info=infotext(), short_filename=not opts.grid_extended_filename)
grid_count += 1
torch_gc()
return output_images, seed, infotext()
def txt2img(prompt: str, ddim_steps: int, sampler_index: int, use_GFPGAN: bool, prompt_matrix: bool, ddim_eta: float, n_iter: int, batch_size: int, cfg_scale: float, seed: int, height: int, width: int):
outpath = opts.outdir or "outputs/txt2img-samples"
sampler = samplers[sampler_index].constructor(model)
def init():
pass
def sample(init_data, x, conditioning, unconditional_conditioning):
samples_ddim, _ = sampler.sample(S=ddim_steps, conditioning=conditioning, batch_size=int(x.shape[0]), shape=x[0].shape, verbose=False, unconditional_guidance_scale=cfg_scale, unconditional_conditioning=unconditional_conditioning, eta=ddim_eta, x_T=x)
return samples_ddim
output_images, seed, info = process_images(
outpath=outpath,
func_init=init,
func_sample=sample,
prompt=prompt,
seed=seed,
sampler_index=sampler_index,
batch_size=batch_size,
n_iter=n_iter,
steps=ddim_steps,
cfg_scale=cfg_scale,
width=width,
height=height,
prompt_matrix=prompt_matrix,
use_GFPGAN=use_GFPGAN
)
del sampler
return output_images, seed, plaintext_to_html(info)
class Flagging(gr.FlaggingCallback):
def setup(self, components, flagging_dir: str):
pass
def flag(self, flag_data, flag_option=None, flag_index=None, username=None):
import csv
os.makedirs("log/images", exist_ok=True)
# those must match the "txt2img" function
prompt, ddim_steps, sampler_name, use_GFPGAN, prompt_matrix, ddim_eta, n_iter, n_samples, cfg_scale, request_seed, height, width, images, seed, comment = flag_data
filenames = []
with open("log/log.csv", "a", encoding="utf8", newline='') as file:
import time
import base64
at_start = file.tell() == 0
writer = csv.writer(file)
if at_start:
writer.writerow(["prompt", "seed", "width", "height", "cfgs", "steps", "filename"])
filename_base = str(int(time.time() * 1000))
for i, filedata in enumerate(images):
filename = "log/images/"+filename_base + ("" if len(images) == 1 else "-"+str(i+1)) + ".png"
if filedata.startswith("data:image/png;base64,"):
filedata = filedata[len("data:image/png;base64,"):]
with open(filename, "wb") as imgfile:
imgfile.write(base64.decodebytes(filedata.encode('utf-8')))
filenames.append(filename)
writer.writerow([prompt, seed, width, height, cfg_scale, ddim_steps, filenames[0]])
print("Logged:", filenames[0])
txt2img_interface = gr.Interface(
wrap_gradio_call(txt2img),
inputs=[
gr.Textbox(label="Prompt", placeholder="A corgi wearing a top hat as an oil painting.", lines=1),
gr.Slider(minimum=1, maximum=150, step=1, label="Sampling Steps", value=50),
gr.Radio(label='Sampling method', choices=[x.name for x in samplers], value=samplers[0].name, type="index"),
gr.Checkbox(label='Fix faces using GFPGAN', value=False, visible=GFPGAN is not None),
gr.Checkbox(label='Create prompt matrix (separate multiple prompts using |, and get all combinations of them)', value=False),
gr.Slider(minimum=0.0, maximum=1.0, step=0.01, label="DDIM ETA", value=0.0, visible=False),
gr.Slider(minimum=1, maximum=cmd_opts.max_batch_count, step=1, label='Batch count (how many batches of images to generate)', value=1),
gr.Slider(minimum=1, maximum=8, step=1, label='Batch size (how many images are in a batch; memory-hungry)', value=1),
gr.Slider(minimum=1.0, maximum=15.0, step=0.5, label='Classifier Free Guidance Scale (how strongly the image should follow the prompt)', value=7.0),
gr.Number(label='Seed', value=-1),
gr.Slider(minimum=64, maximum=2048, step=64, label="Height", value=512),
gr.Slider(minimum=64, maximum=2048, step=64, label="Width", value=512),
],
outputs=[
gr.Gallery(label="Images"),
gr.Number(label='Seed'),
gr.HTML(),
],
title="Stable Diffusion Text-to-Image",
flagging_callback=Flagging()
)
def img2img(prompt: str, init_img, ddim_steps: int, sampler_index: int, use_GFPGAN: bool, prompt_matrix, loopback: bool, n_iter: int, batch_size: int, cfg_scale: float, denoising_strength: float, seed: int, height: int, width: int, resize_mode: int):
outpath = opts.outdir or "outputs/img2img-samples"
sampler = samplers_for_img2img[sampler_index].constructor(model)
assert 0. <= denoising_strength <= 1., 'can only work with strength in [0.0, 1.0]'
def init():
image = init_img.convert("RGB")
image = resize_image(resize_mode, image, width, height)
image = np.array(image).astype(np.float32) / 255.0
image = image[None].transpose(0, 3, 1, 2)
image = torch.from_numpy(image)
init_image = 2. * image - 1.
init_image = init_image.to(device)
init_image = repeat(init_image, '1 ... -> b ...', b=batch_size)
init_latent = model.get_first_stage_encoding(model.encode_first_stage(init_image)) # move to latent space
return init_latent,
def sample(init_data, x, conditioning, unconditional_conditioning):
t_enc = int(denoising_strength * ddim_steps)
x0, = init_data
sigmas = sampler.model_wrap.get_sigmas(ddim_steps)
noise = x * sigmas[ddim_steps - t_enc - 1]
xi = x0 + noise
sigma_sched = sigmas[ddim_steps - t_enc - 1:]
model_wrap_cfg = CFGDenoiser(sampler.model_wrap)
samples_ddim = sampler.func(model_wrap_cfg, xi, sigma_sched, extra_args={'cond': conditioning, 'uncond': unconditional_conditioning, 'cond_scale': cfg_scale}, disable=False)
return samples_ddim
if loopback:
output_images, info = None, None
history = []
initial_seed = None
for i in range(n_iter):
output_images, seed, info = process_images(
outpath=outpath,
func_init=init,
func_sample=sample,
prompt=prompt,
seed=seed,
sampler_index=0,
batch_size=1,
n_iter=1,
steps=ddim_steps,
cfg_scale=cfg_scale,
width=width,
height=height,
prompt_matrix=prompt_matrix,
use_GFPGAN=use_GFPGAN,
do_not_save_grid=True,
extra_generation_params={"Denoising Strength": denoising_strength},
)
if initial_seed is None:
initial_seed = seed
init_img = output_images[0]
seed = seed + 1
denoising_strength = max(denoising_strength * 0.95, 0.1)
history.append(init_img)
grid_count = len(os.listdir(outpath)) - 1
grid = image_grid(history, batch_size, force_n_rows=1)
save_image(grid, outpath, f"grid-{grid_count:04}", initial_seed, prompt, opts.grid_format, info=info, short_filename=not opts.grid_extended_filename)
output_images = history
seed = initial_seed
else:
output_images, seed, info = process_images(
outpath=outpath,
func_init=init,
func_sample=sample,
prompt=prompt,
seed=seed,
sampler_index=0,
batch_size=batch_size,
n_iter=n_iter,
steps=ddim_steps,
cfg_scale=cfg_scale,
width=width,
height=height,
prompt_matrix=prompt_matrix,
use_GFPGAN=use_GFPGAN,
extra_generation_params={"Denoising Strength": denoising_strength},
)
del sampler
return output_images, seed, plaintext_to_html(info)
sample_img2img = "assets/stable-samples/img2img/sketch-mountains-input.jpg"
sample_img2img = sample_img2img if os.path.exists(sample_img2img) else None
img2img_interface = gr.Interface(
wrap_gradio_call(img2img),
inputs=[
gr.Textbox(placeholder="A fantasy landscape, trending on artstation.", lines=1),
gr.Image(value=sample_img2img, source="upload", interactive=True, type="pil"),
gr.Slider(minimum=1, maximum=150, step=1, label="Sampling Steps", value=50),
gr.Radio(label='Sampling method', choices=[x.name for x in samplers_for_img2img], value=samplers_for_img2img[0].name, type="index"),
gr.Checkbox(label='Fix faces using GFPGAN', value=False, visible=GFPGAN is not None),
gr.Checkbox(label='Create prompt matrix (separate multiple prompts using |, and get all combinations of them)', value=False),
gr.Checkbox(label='Loopback (use images from previous batch when creating next batch)', value=False),
gr.Slider(minimum=1, maximum=cmd_opts.max_batch_count, step=1, label='Batch count (how many batches of images to generate)', value=1),
gr.Slider(minimum=1, maximum=8, step=1, label='Batch size (how many images are in a batch; memory-hungry)', value=1),
gr.Slider(minimum=1.0, maximum=15.0, step=0.5, label='Classifier Free Guidance Scale (how strongly the image should follow the prompt)', value=7.0),
gr.Slider(minimum=0.0, maximum=1.0, step=0.01, label='Denoising Strength', value=0.75),
gr.Number(label='Seed', value=-1),
gr.Slider(minimum=64, maximum=2048, step=64, label="Height", value=512),
gr.Slider(minimum=64, maximum=2048, step=64, label="Width", value=512),
gr.Radio(label="Resize mode", choices=["Just resize", "Crop and resize", "Resize and fill"], type="index", value="Just resize")
],
outputs=[
gr.Gallery(),
gr.Number(label='Seed'),
gr.HTML(),
],
allow_flagging="never",
)
def run_extras(image, GFPGAN_strength, RealESRGAN_upscaling, RealESRGAN_model_index):
image = image.convert("RGB")
outpath = opts.outdir or "outputs/extras-samples"
if GFPGAN is not None and GFPGAN_strength > 0:
cropped_faces, restored_faces, restored_img = GFPGAN.enhance(np.array(image, dtype=np.uint8), has_aligned=False, only_center_face=False, paste_back=True)
res = Image.fromarray(restored_img)
if GFPGAN_strength < 1.0:
res = Image.blend(image, res, GFPGAN_strength)
image = res
if have_realesrgan and RealESRGAN_upscaling != 1.0:
info = realesrgan_models[RealESRGAN_model_index]
model = info.model()
upsampler = RealESRGANer(
scale=info.netscale,
model_path=info.location,
model=model,
half=True
)
upsampled = upsampler.enhance(np.array(image), outscale=RealESRGAN_upscaling)[0]
image = Image.fromarray(upsampled)
os.makedirs(outpath, exist_ok=True)
base_count = len(os.listdir(outpath))
save_image(image, outpath, f"{base_count:05}", None, '', opts.samples_format, short_filename=True)
return image, 0, ''
extras_interface = gr.Interface(
wrap_gradio_call(run_extras),
inputs=[
gr.Image(label="Source", source="upload", interactive=True, type="pil"),
gr.Slider(minimum=0.0, maximum=1.0, step=0.001, label="GFPGAN strength", value=1, interactive=GFPGAN is not None),
gr.Slider(minimum=1.0, maximum=4.0, step=0.05, label="Real-ESRGAN upscaling", value=2, interactive=have_realesrgan),
gr.Radio(label='Real-ESRGAN model', choices=[x.name for x in realesrgan_models], value=realesrgan_models[0].name, type="index", interactive=have_realesrgan),
],
outputs=[
gr.Image(label="Result"),
gr.Number(label='Seed', visible=False),
gr.HTML(),
],
allow_flagging="never",
)
opts = Options()
if os.path.exists(config_filename):
opts.load(config_filename)
def run_settings(*args):
up = []
for key, value, comp in zip(opts.data_labels.keys(), args, settings_interface.input_components):
opts.data[key] = value
up.append(comp.update(value=value))
opts.save(config_filename)
return 'Settings saved.', ''
def create_setting_component(key):
def fun():
return opts.data[key] if key in opts.data else opts.data_labels[key][0]
labelinfo = opts.data_labels[key]
t = type(labelinfo[0])
label = labelinfo[1]
if t == str:
item = gr.Textbox(label=label, value=fun, lines=1)
elif t == int:
if len(labelinfo) == 4:
item = gr.Slider(minimum=labelinfo[2], maximum=labelinfo[3], step=1, label=label, value=fun)
else:
item = gr.Number(label=label, value=fun)
elif t == bool:
item = gr.Checkbox(label=label, value=fun)
else:
raise Exception(f'bad options item type: {str(t)} for key {key}')
return item
settings_interface = gr.Interface(
run_settings,
inputs=[create_setting_component(key) for key in opts.data_labels.keys()],
outputs=[
gr.Textbox(label='Result'),
gr.HTML(),
],
title=None,
description=None,
allow_flagging="never",
)
interfaces = [
(txt2img_interface, "txt2img"),
(img2img_interface, "img2img"),
(extras_interface, "Extras"),
(settings_interface, "Settings"),
]
config = OmegaConf.load(cmd_opts.config)
model = load_model_from_config(config, cmd_opts.ckpt)
device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
model = (model if cmd_opts.no_half else model.half()).to(device)
text_inversion_embeddings = TextInversionEmbeddings()
if os.path.exists(cmd_opts.embeddings_dir):
text_inversion_embeddings.hijack(model)
demo = gr.TabbedInterface(
interface_list=[x[0] for x in interfaces],
tab_names=[x[1] for x in interfaces],
css=("" if cmd_opts.no_progressbar_hiding else css_hide_progressbar) + """
.output-html p {margin: 0 0.5em;}
.performance { font-size: 0.85em; color: #444; }
"""
)
demo.launch()
<|code_end|>
|
webui.py
<|code_start|>import argparse, os, sys, glob
from collections import namedtuple
import torch
import torch.nn as nn
import numpy as np
import gradio as gr
from omegaconf import OmegaConf
from PIL import Image, ImageFont, ImageDraw, PngImagePlugin
from itertools import islice
from einops import rearrange, repeat
from torch import autocast
import mimetypes
import random
import math
import html
import time
import json
import traceback
import k_diffusion.sampling
from ldm.util import instantiate_from_config
from ldm.models.diffusion.ddim import DDIMSampler
from ldm.models.diffusion.plms import PLMSSampler
import ldm.modules.encoders.modules
try:
# this silences the annoying "Some weights of the model checkpoint were not used when initializing..." message at start.
from transformers import logging
logging.set_verbosity_error()
except:
pass
# this is a fix for Windows users. Without it, javascript files will be served with text/html content-type and the bowser will not show any UI
mimetypes.init()
mimetypes.add_type('application/javascript', '.js')
# some of those options should not be changed at all because they would break the model, so I removed them from options.
opt_C = 4
opt_f = 8
LANCZOS = (Image.Resampling.LANCZOS if hasattr(Image, 'Resampling') else Image.LANCZOS)
invalid_filename_chars = '<>:"/\|?*\n'
config_filename = "config.json"
parser = argparse.ArgumentParser()
parser.add_argument("--config", type=str, default="configs/stable-diffusion/v1-inference.yaml", help="path to config which constructs model",)
parser.add_argument("--ckpt", type=str, default="models/ldm/stable-diffusion-v1/model.ckpt", help="path to checkpoint of model",)
parser.add_argument("--gfpgan-dir", type=str, help="GFPGAN directory", default=('./src/gfpgan' if os.path.exists('./src/gfpgan') else './GFPGAN')) # i disagree with where you're putting it but since all guidefags are doing it this way, there you go
parser.add_argument("--no-half", action='store_true', help="do not switch the model to 16-bit floats")
parser.add_argument("--no-progressbar-hiding", action='store_true', help="do not hide progressbar in gradio UI (we hide it because it slows down ML if you have hardware accleration in browser)")
parser.add_argument("--max-batch-count", type=int, default=16, help="maximum batch count value for the UI")
parser.add_argument("--embeddings-dir", type=str, default='embeddings', help="embeddings dirtectory for textual inversion (default: embeddings)")
cmd_opts = parser.parse_args()
css_hide_progressbar = """
.wrap .m-12 svg { display:none!important; }
.wrap .m-12::before { content:"Loading..." }
.progress-bar { display:none!important; }
.meta-text { display:none!important; }
"""
SamplerData = namedtuple('SamplerData', ['name', 'constructor'])
samplers = [
*[SamplerData(x[0], lambda m, funcname=x[1]: KDiffusionSampler(m, funcname)) for x in [
('LMS', 'sample_lms'),
('Heun', 'sample_heun'),
('Euler', 'sample_euler'),
('Euler ancestral', 'sample_euler_ancestral'),
('DPM 2', 'sample_dpm_2'),
('DPM 2 Ancestral', 'sample_dpm_2_ancestral'),
] if hasattr(k_diffusion.sampling, x[1])],
SamplerData('DDIM', lambda m: DDIMSampler(model)),
SamplerData('PLMS', lambda m: PLMSSampler(model)),
]
samplers_for_img2img = [x for x in samplers if x.name != 'DDIM' and x.name != 'PLMS']
RealesrganModelInfo = namedtuple("RealesrganModelInfo", ["name", "location", "model", "netscale"])
try:
from basicsr.archs.rrdbnet_arch import RRDBNet
from realesrgan import RealESRGANer
from realesrgan.archs.srvgg_arch import SRVGGNetCompact
realesrgan_models = [
RealesrganModelInfo(
name="Real-ESRGAN 2x plus",
location="https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.1/RealESRGAN_x2plus.pth",
netscale=2, model=lambda: RRDBNet(num_in_ch=3, num_out_ch=3, num_feat=64, num_block=23, num_grow_ch=32, scale=2)
),
RealesrganModelInfo(
name="Real-ESRGAN 4x plus",
location="https://github.com/xinntao/Real-ESRGAN/releases/download/v0.1.0/RealESRGAN_x4plus.pth",
netscale=4, model=lambda: RRDBNet(num_in_ch=3, num_out_ch=3, num_feat=64, num_block=23, num_grow_ch=32, scale=4)
),
RealesrganModelInfo(
name="Real-ESRGAN 4x plus anime 6B",
location="https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.2.4/RealESRGAN_x4plus_anime_6B.pth",
netscale=4, model=lambda: RRDBNet(num_in_ch=3, num_out_ch=3, num_feat=64, num_block=6, num_grow_ch=32, scale=4)
),
]
have_realesrgan = True
except:
print("Error loading Real-ESRGAN:", file=sys.stderr)
print(traceback.format_exc(), file=sys.stderr)
realesrgan_models = [RealesrganModelInfo('None', '', 0, None)]
have_realesrgan = False
class Options:
data = None
data_labels = {
"outdir": ("", "Output dictectory; if empty, defaults to 'outputs/*'"),
"samples_save": (True, "Save indiviual samples"),
"samples_format": ('png', 'File format for indiviual samples'),
"grid_save": (True, "Save image grids"),
"grid_format": ('png', 'File format for grids'),
"grid_extended_filename": (False, "Add extended info (seed, prompt) to filename when saving grid"),
"n_rows": (-1, "Grid row count; use -1 for autodetect and 0 for it to be same as batch size", -1, 16),
"jpeg_quality": (80, "Quality for saved jpeg images", 1, 100),
"verify_input": (True, "Check input, and produce warning if it's too long"),
"enable_pnginfo": (True, "Save text information about generation parameters as chunks to png files"),
"prompt_matrix_add_to_start": (True, "In prompt matrix, add the variable combination of text to the start of the prompt, rather than the end"),
}
def __init__(self):
self.data = {k: v[0] for k, v in self.data_labels.items()}
def __setattr__(self, key, value):
if self.data is not None:
if key in self.data:
self.data[key] = value
return super(Options, self).__setattr__(key, value)
def __getattr__(self, item):
if self.data is not None:
if item in self.data:
return self.data[item]
if item in self.data_labels:
return self.data_labels[item][0]
return super(Options, self).__getattribute__(item)
def save(self, filename):
with open(filename, "w", encoding="utf8") as file:
json.dump(self.data, file)
def load(self, filename):
with open(filename, "r", encoding="utf8") as file:
self.data = json.load(file)
def chunk(it, size):
it = iter(it)
return iter(lambda: tuple(islice(it, size)), ())
def load_model_from_config(config, ckpt, verbose=False):
print(f"Loading model from {ckpt}")
pl_sd = torch.load(ckpt, map_location="cpu")
if "global_step" in pl_sd:
print(f"Global Step: {pl_sd['global_step']}")
sd = pl_sd["state_dict"]
model = instantiate_from_config(config.model)
m, u = model.load_state_dict(sd, strict=False)
if len(m) > 0 and verbose:
print("missing keys:")
print(m)
if len(u) > 0 and verbose:
print("unexpected keys:")
print(u)
model.cuda()
model.eval()
return model
class CFGDenoiser(nn.Module):
def __init__(self, model):
super().__init__()
self.inner_model = model
def forward(self, x, sigma, uncond, cond, cond_scale):
x_in = torch.cat([x] * 2)
sigma_in = torch.cat([sigma] * 2)
cond_in = torch.cat([uncond, cond])
uncond, cond = self.inner_model(x_in, sigma_in, cond=cond_in).chunk(2)
return uncond + (cond - uncond) * cond_scale
class KDiffusionSampler:
def __init__(self, m, funcname):
self.model = m
self.model_wrap = k_diffusion.external.CompVisDenoiser(m)
self.funcname = funcname
self.func = getattr(k_diffusion.sampling, self.funcname)
def sample(self, S, conditioning, batch_size, shape, verbose, unconditional_guidance_scale, unconditional_conditioning, eta, x_T):
sigmas = self.model_wrap.get_sigmas(S)
x = x_T * sigmas[0]
model_wrap_cfg = CFGDenoiser(self.model_wrap)
samples_ddim = self.func(model_wrap_cfg, x, sigmas, extra_args={'cond': conditioning, 'uncond': unconditional_conditioning, 'cond_scale': unconditional_guidance_scale}, disable=False)
return samples_ddim, None
def create_random_tensors(shape, seeds):
xs = []
for seed in seeds:
torch.manual_seed(seed)
# randn results depend on device; gpu and cpu get different results for same seed;
# the way I see it, it's better to do this on CPU, so that everyone gets same result;
# but the original script had it like this so i do not dare change it for now because
# it will break everyone's seeds.
xs.append(torch.randn(shape, device=device))
x = torch.stack(xs)
return x
def torch_gc():
torch.cuda.empty_cache()
torch.cuda.ipc_collect()
def save_image(image, path, basename, seed, prompt, extension, info=None, short_filename=False):
prompt = sanitize_filename_part(prompt)
if short_filename:
filename = f"{basename}.{extension}"
else:
filename = f"{basename}-{seed}-{prompt[:128]}.{extension}"
if extension == 'png' and opts.enable_pnginfo and info is not None:
pnginfo = PngImagePlugin.PngInfo()
pnginfo.add_text("parameters", info)
else:
pnginfo = None
image.save(os.path.join(path, filename), quality=opts.jpeg_quality, pnginfo=pnginfo)
def sanitize_filename_part(text):
return text.replace(' ', '_').translate({ord(x): '' for x in invalid_filename_chars})[:128]
def plaintext_to_html(text):
text = "".join([f"<p>{html.escape(x)}</p>\n" for x in text.split('\n')])
return text
def load_GFPGAN():
model_name = 'GFPGANv1.3'
model_path = os.path.join(cmd_opts.gfpgan_dir, 'experiments/pretrained_models', model_name + '.pth')
if not os.path.isfile(model_path):
raise Exception("GFPGAN model not found at path "+model_path)
sys.path.append(os.path.abspath(cmd_opts.gfpgan_dir))
from gfpgan import GFPGANer
return GFPGANer(model_path=model_path, upscale=1, arch='clean', channel_multiplier=2, bg_upsampler=None)
def image_grid(imgs, batch_size, force_n_rows=None):
if force_n_rows is not None:
rows = force_n_rows
elif opts.n_rows > 0:
rows = opts.n_rows
elif opts.n_rows == 0:
rows = batch_size
else:
rows = math.sqrt(len(imgs))
rows = round(rows)
cols = math.ceil(len(imgs) / rows)
w, h = imgs[0].size
grid = Image.new('RGB', size=(cols * w, rows * h), color='black')
for i, img in enumerate(imgs):
grid.paste(img, box=(i % cols * w, i // cols * h))
return grid
def draw_prompt_matrix(im, width, height, all_prompts):
def wrap(text, d, font, line_length):
lines = ['']
for word in text.split():
line = f'{lines[-1]} {word}'.strip()
if d.textlength(line, font=font) <= line_length:
lines[-1] = line
else:
lines.append(word)
return '\n'.join(lines)
def draw_texts(pos, x, y, texts, sizes):
for i, (text, size) in enumerate(zip(texts, sizes)):
active = pos & (1 << i) != 0
if not active:
text = '\u0336'.join(text) + '\u0336'
d.multiline_text((x, y + size[1] / 2), text, font=fnt, fill=color_active if active else color_inactive, anchor="mm", align="center")
y += size[1] + line_spacing
fontsize = (width + height) // 25
line_spacing = fontsize // 2
fnt = ImageFont.truetype("arial.ttf", fontsize)
color_active = (0, 0, 0)
color_inactive = (153, 153, 153)
pad_top = height // 4
pad_left = width * 3 // 4 if len(all_prompts) > 2 else 0
cols = im.width // width
rows = im.height // height
prompts = all_prompts[1:]
result = Image.new("RGB", (im.width + pad_left, im.height + pad_top), "white")
result.paste(im, (pad_left, pad_top))
d = ImageDraw.Draw(result)
boundary = math.ceil(len(prompts) / 2)
prompts_horiz = [wrap(x, d, fnt, width) for x in prompts[:boundary]]
prompts_vert = [wrap(x, d, fnt, pad_left) for x in prompts[boundary:]]
sizes_hor = [(x[2] - x[0], x[3] - x[1]) for x in [d.multiline_textbbox((0, 0), x, font=fnt) for x in prompts_horiz]]
sizes_ver = [(x[2] - x[0], x[3] - x[1]) for x in [d.multiline_textbbox((0, 0), x, font=fnt) for x in prompts_vert]]
hor_text_height = sum([x[1] + line_spacing for x in sizes_hor]) - line_spacing
ver_text_height = sum([x[1] + line_spacing for x in sizes_ver]) - line_spacing
for col in range(cols):
x = pad_left + width * col + width / 2
y = pad_top / 2 - hor_text_height / 2
draw_texts(col, x, y, prompts_horiz, sizes_hor)
for row in range(rows):
x = pad_left / 2
y = pad_top + height * row + height / 2 - ver_text_height / 2
draw_texts(row, x, y, prompts_vert, sizes_ver)
return result
def resize_image(resize_mode, im, width, height):
if resize_mode == 0:
res = im.resize((width, height), resample=LANCZOS)
elif resize_mode == 1:
ratio = width / height
src_ratio = im.width / im.height
src_w = width if ratio > src_ratio else im.width * height // im.height
src_h = height if ratio <= src_ratio else im.height * width // im.width
resized = im.resize((src_w, src_h), resample=LANCZOS)
res = Image.new("RGB", (width, height))
res.paste(resized, box=(width // 2 - src_w // 2, height // 2 - src_h // 2))
else:
ratio = width / height
src_ratio = im.width / im.height
src_w = width if ratio < src_ratio else im.width * height // im.height
src_h = height if ratio >= src_ratio else im.height * width // im.width
resized = im.resize((src_w, src_h), resample=LANCZOS)
res = Image.new("RGB", (width, height))
res.paste(resized, box=(width // 2 - src_w // 2, height // 2 - src_h // 2))
if ratio < src_ratio:
fill_height = height // 2 - src_h // 2
res.paste(resized.resize((width, fill_height), box=(0, 0, width, 0)), box=(0, 0))
res.paste(resized.resize((width, fill_height), box=(0, resized.height, width, resized.height)), box=(0, fill_height + src_h))
elif ratio > src_ratio:
fill_width = width // 2 - src_w // 2
res.paste(resized.resize((fill_width, height), box=(0, 0, 0, height)), box=(0, 0))
res.paste(resized.resize((fill_width, height), box=(resized.width, 0, resized.width, height)), box=(fill_width + src_w, 0))
return res
def check_prompt_length(prompt, comments):
"""this function tests if prompt is too long, and if so, adds a message to comments"""
tokenizer = model.cond_stage_model.tokenizer
max_length = model.cond_stage_model.max_length
info = model.cond_stage_model.tokenizer([prompt], truncation=True, max_length=max_length, return_overflowing_tokens=True, padding="max_length", return_tensors="pt")
ovf = info['overflowing_tokens'][0]
overflowing_count = ovf.shape[0]
if overflowing_count == 0:
return
vocab = {v: k for k, v in tokenizer.get_vocab().items()}
overflowing_words = [vocab.get(int(x), "") for x in ovf]
overflowing_text = tokenizer.convert_tokens_to_string(''.join(overflowing_words))
comments.append(f"Warning: too many input tokens; some ({len(overflowing_words)}) have been truncated:\n{overflowing_text}\n")
def wrap_gradio_call(func):
def f(*p1, **p2):
t = time.perf_counter()
res = list(func(*p1, **p2))
elapsed = time.perf_counter() - t
# last item is always HTML
res[-1] = res[-1] + f"<p class='performance'>Time taken: {elapsed:.2f}s</p>"
return tuple(res)
return f
GFPGAN = None
if os.path.exists(cmd_opts.gfpgan_dir):
try:
GFPGAN = load_GFPGAN()
print("Loaded GFPGAN")
except Exception:
print("Error loading GFPGAN:", file=sys.stderr)
print(traceback.format_exc(), file=sys.stderr)
class TextInversionEmbeddings:
ids_lookup = {}
word_embeddings = {}
word_embeddings_checksums = {}
fixes = []
used_custom_terms = []
dir_mtime = None
def load(self, dir, model):
mt = os.path.getmtime(dir)
if self.dir_mtime is not None and mt <= self.dir_mtime:
return
self.dir_mtime = mt
self.ids_lookup.clear()
self.word_embeddings.clear()
tokenizer = model.cond_stage_model.tokenizer
def const_hash(a):
r = 0
for v in a:
r = (r * 281 ^ int(v) * 997) & 0xFFFFFFFF
return r
def process_file(path, filename):
name = os.path.splitext(filename)[0]
data = torch.load(path)
param_dict = data['string_to_param']
assert len(param_dict) == 1, 'embedding file has multiple terms in it'
emb = next(iter(param_dict.items()))[1].reshape(768)
self.word_embeddings[name] = emb
self.word_embeddings_checksums[name] = f'{const_hash(emb)&0xffff:04x}'
ids = tokenizer([name], add_special_tokens=False)['input_ids'][0]
first_id = ids[0]
if first_id not in self.ids_lookup:
self.ids_lookup[first_id] = []
self.ids_lookup[first_id].append((ids, name))
for fn in os.listdir(dir):
try:
process_file(os.path.join(dir, fn), fn)
except:
print(f"Error loading emedding {fn}:", file=sys.stderr)
print(traceback.format_exc(), file=sys.stderr)
continue
print(f"Loaded a total of {len(self.word_embeddings)} text inversion embeddings.")
def hijack(self, m):
model_embeddings = m.cond_stage_model.transformer.text_model.embeddings
model_embeddings.token_embedding = EmbeddingsWithFixes(model_embeddings.token_embedding, self)
m.cond_stage_model = FrozenCLIPEmbedderWithCustomWords(m.cond_stage_model, self)
class FrozenCLIPEmbedderWithCustomWords(torch.nn.Module):
def __init__(self, wrapped, embeddings):
super().__init__()
self.wrapped = wrapped
self.embeddings = embeddings
self.tokenizer = wrapped.tokenizer
self.max_length = wrapped.max_length
def forward(self, text):
self.embeddings.fixes = []
self.embeddings.used_custom_terms = []
remade_batch_tokens = []
id_start = self.wrapped.tokenizer.bos_token_id
id_end = self.wrapped.tokenizer.eos_token_id
maxlen = self.wrapped.max_length - 2
cache = {}
batch_tokens = self.wrapped.tokenizer(text, truncation=False, add_special_tokens=False)["input_ids"]
for tokens in batch_tokens:
tuple_tokens = tuple(tokens)
if tuple_tokens in cache:
remade_tokens, fixes = cache[tuple_tokens]
else:
fixes = []
remade_tokens = []
i = 0
while i < len(tokens):
token = tokens[i]
possible_matches = self.embeddings.ids_lookup.get(token, None)
if possible_matches is None:
remade_tokens.append(token)
else:
found = False
for ids, word in possible_matches:
if tokens[i:i+len(ids)] == ids:
fixes.append((len(remade_tokens), word))
remade_tokens.append(777)
i += len(ids) - 1
found = True
self.embeddings.used_custom_terms.append((word, self.embeddings.word_embeddings_checksums[word]))
break
if not found:
remade_tokens.append(token)
i += 1
remade_tokens = remade_tokens + [id_end] * (maxlen - 2 - len(remade_tokens))
remade_tokens = [id_start] + remade_tokens[0:maxlen-2] + [id_end]
cache[tuple_tokens] = (remade_tokens, fixes)
remade_batch_tokens.append(remade_tokens)
self.embeddings.fixes.append(fixes)
tokens = torch.asarray(remade_batch_tokens).to(self.wrapped.device)
outputs = self.wrapped.transformer(input_ids=tokens)
z = outputs.last_hidden_state
return z
class EmbeddingsWithFixes(nn.Module):
def __init__(self, wrapped, embeddings):
super().__init__()
self.wrapped = wrapped
self.embeddings = embeddings
def forward(self, input_ids):
batch_fixes = self.embeddings.fixes
self.embeddings.fixes = []
inputs_embeds = self.wrapped(input_ids)
for fixes, tensor in zip(batch_fixes, inputs_embeds):
for offset, word in fixes:
tensor[offset] = self.embeddings.word_embeddings[word]
return inputs_embeds
def get_learned_conditioning_with_embeddings(model, prompts):
if os.path.exists(cmd_opts.embeddings_dir):
text_inversion_embeddings.load(cmd_opts.embeddings_dir, model)
return model.get_learned_conditioning(prompts)
def process_images(outpath, func_init, func_sample, prompt, seed, sampler_index, batch_size, n_iter, steps, cfg_scale, width, height, prompt_matrix, use_GFPGAN, do_not_save_grid=False, extra_generation_params=None):
"""this is the main loop that both txt2img and img2img use; it calls func_init once inside all the scopes and func_sample once per batch"""
assert prompt is not None
torch_gc()
if seed == -1:
seed = random.randrange(4294967294)
seed = int(seed)
os.makedirs(outpath, exist_ok=True)
sample_path = os.path.join(outpath, "samples")
os.makedirs(sample_path, exist_ok=True)
base_count = len(os.listdir(sample_path))
grid_count = len(os.listdir(outpath)) - 1
comments = []
prompt_matrix_parts = []
if prompt_matrix:
all_prompts = []
prompt_matrix_parts = prompt.split("|")
combination_count = 2 ** (len(prompt_matrix_parts) - 1)
for combination_num in range(combination_count):
selected_prompts = [text.strip().strip(',') for n, text in enumerate(prompt_matrix_parts[1:]) if combination_num & (1<<n)]
if opts.prompt_matrix_add_to_start:
selected_prompts = selected_prompts + [prompt_matrix_parts[0]]
else:
selected_prompts = [prompt_matrix_parts[0]] + selected_prompts
all_prompts.append( ", ".join(selected_prompts))
n_iter = math.ceil(len(all_prompts) / batch_size)
all_seeds = len(all_prompts) * [seed]
print(f"Prompt matrix will create {len(all_prompts)} images using a total of {n_iter} batches.")
else:
if opts.verify_input:
try:
check_prompt_length(prompt, comments)
except:
import traceback
print("Error verifying input:", file=sys.stderr)
print(traceback.format_exc(), file=sys.stderr)
all_prompts = batch_size * n_iter * [prompt]
all_seeds = [seed + x for x in range(len(all_prompts))]
generation_params = {
"Steps": steps,
"Sampler": samplers[sampler_index].name,
"CFG scale": cfg_scale,
"Seed": seed,
"GFPGAN": ("GFPGAN" if use_GFPGAN and GFPGAN is not None else None)
}
if extra_generation_params is not None:
generation_params.update(extra_generation_params)
generation_params_text = ", ".join([k if k == v else f'{k}: {v}' for k, v in generation_params.items() if v is not None])
def infotext():
return f"{prompt}\n{generation_params_text}".strip() + "".join(["\n\n" + x for x in comments])
if os.path.exists(cmd_opts.embeddings_dir):
text_inversion_embeddings.load(cmd_opts.embeddings_dir, model)
output_images = []
with torch.no_grad(), autocast("cuda"), model.ema_scope():
init_data = func_init()
for n in range(n_iter):
prompts = all_prompts[n * batch_size:(n + 1) * batch_size]
seeds = all_seeds[n * batch_size:(n + 1) * batch_size]
uc = model.get_learned_conditioning(len(prompts) * [""])
c = model.get_learned_conditioning(prompts)
if len(text_inversion_embeddings.used_custom_terms) > 0:
comments.append("Used custom terms: " + ", ".join([f'{word} [{checksum}]' for word, checksum in text_inversion_embeddings.used_custom_terms]))
# we manually generate all input noises because each one should have a specific seed
x = create_random_tensors([opt_C, height // opt_f, width // opt_f], seeds=seeds)
samples_ddim = func_sample(init_data=init_data, x=x, conditioning=c, unconditional_conditioning=uc)
x_samples_ddim = model.decode_first_stage(samples_ddim)
x_samples_ddim = torch.clamp((x_samples_ddim + 1.0) / 2.0, min=0.0, max=1.0)
if prompt_matrix or opts.samples_save or opts.grid_save:
for i, x_sample in enumerate(x_samples_ddim):
x_sample = 255. * rearrange(x_sample.cpu().numpy(), 'c h w -> h w c')
x_sample = x_sample.astype(np.uint8)
if use_GFPGAN and GFPGAN is not None:
torch_gc()
cropped_faces, restored_faces, restored_img = GFPGAN.enhance(x_sample, has_aligned=False, only_center_face=False, paste_back=True)
x_sample = restored_img
image = Image.fromarray(x_sample)
save_image(image, sample_path, f"{base_count:05}", seeds[i], prompts[i], opts.samples_format, info=infotext())
output_images.append(image)
base_count += 1
if (prompt_matrix or opts.grid_save) and not do_not_save_grid:
if prompt_matrix:
grid = image_grid(output_images, batch_size, force_n_rows=1 << ((len(prompt_matrix_parts)-1)//2))
try:
grid = draw_prompt_matrix(grid, width, height, prompt_matrix_parts)
except:
import traceback
print("Error creating prompt_matrix text:", file=sys.stderr)
print(traceback.format_exc(), file=sys.stderr)
output_images.insert(0, grid)
else:
grid = image_grid(output_images, batch_size)
save_image(grid, outpath, f"grid-{grid_count:04}", seed, prompt, opts.grid_format, info=infotext(), short_filename=not opts.grid_extended_filename)
grid_count += 1
torch_gc()
return output_images, seed, infotext()
def txt2img(prompt: str, ddim_steps: int, sampler_index: int, use_GFPGAN: bool, prompt_matrix: bool, ddim_eta: float, n_iter: int, batch_size: int, cfg_scale: float, seed: int, height: int, width: int):
outpath = opts.outdir or "outputs/txt2img-samples"
sampler = samplers[sampler_index].constructor(model)
def init():
pass
def sample(init_data, x, conditioning, unconditional_conditioning):
samples_ddim, _ = sampler.sample(S=ddim_steps, conditioning=conditioning, batch_size=int(x.shape[0]), shape=x[0].shape, verbose=False, unconditional_guidance_scale=cfg_scale, unconditional_conditioning=unconditional_conditioning, eta=ddim_eta, x_T=x)
return samples_ddim
output_images, seed, info = process_images(
outpath=outpath,
func_init=init,
func_sample=sample,
prompt=prompt,
seed=seed,
sampler_index=sampler_index,
batch_size=batch_size,
n_iter=n_iter,
steps=ddim_steps,
cfg_scale=cfg_scale,
width=width,
height=height,
prompt_matrix=prompt_matrix,
use_GFPGAN=use_GFPGAN
)
del sampler
return output_images, seed, plaintext_to_html(info)
class Flagging(gr.FlaggingCallback):
def setup(self, components, flagging_dir: str):
pass
def flag(self, flag_data, flag_option=None, flag_index=None, username=None):
import csv
os.makedirs("log/images", exist_ok=True)
# those must match the "txt2img" function
prompt, ddim_steps, sampler_name, use_GFPGAN, prompt_matrix, ddim_eta, n_iter, n_samples, cfg_scale, request_seed, height, width, images, seed, comment = flag_data
filenames = []
with open("log/log.csv", "a", encoding="utf8", newline='') as file:
import time
import base64
at_start = file.tell() == 0
writer = csv.writer(file)
if at_start:
writer.writerow(["prompt", "seed", "width", "height", "cfgs", "steps", "filename"])
filename_base = str(int(time.time() * 1000))
for i, filedata in enumerate(images):
filename = "log/images/"+filename_base + ("" if len(images) == 1 else "-"+str(i+1)) + ".png"
if filedata.startswith("data:image/png;base64,"):
filedata = filedata[len("data:image/png;base64,"):]
with open(filename, "wb") as imgfile:
imgfile.write(base64.decodebytes(filedata.encode('utf-8')))
filenames.append(filename)
writer.writerow([prompt, seed, width, height, cfg_scale, ddim_steps, filenames[0]])
print("Logged:", filenames[0])
txt2img_interface = gr.Interface(
wrap_gradio_call(txt2img),
inputs=[
gr.Textbox(label="Prompt", placeholder="A corgi wearing a top hat as an oil painting.", lines=1),
gr.Slider(minimum=1, maximum=150, step=1, label="Sampling Steps", value=50),
gr.Radio(label='Sampling method', choices=[x.name for x in samplers], value=samplers[0].name, type="index"),
gr.Checkbox(label='Fix faces using GFPGAN', value=False, visible=GFPGAN is not None),
gr.Checkbox(label='Create prompt matrix (separate multiple prompts using |, and get all combinations of them)', value=False),
gr.Slider(minimum=0.0, maximum=1.0, step=0.01, label="DDIM ETA", value=0.0, visible=False),
gr.Slider(minimum=1, maximum=cmd_opts.max_batch_count, step=1, label='Batch count (how many batches of images to generate)', value=1),
gr.Slider(minimum=1, maximum=8, step=1, label='Batch size (how many images are in a batch; memory-hungry)', value=1),
gr.Slider(minimum=1.0, maximum=15.0, step=0.5, label='Classifier Free Guidance Scale (how strongly the image should follow the prompt)', value=7.0),
gr.Number(label='Seed', value=-1),
gr.Slider(minimum=64, maximum=2048, step=64, label="Height", value=512),
gr.Slider(minimum=64, maximum=2048, step=64, label="Width", value=512),
],
outputs=[
gr.Gallery(label="Images"),
gr.Number(label='Seed'),
gr.HTML(),
],
title="Stable Diffusion Text-to-Image",
flagging_callback=Flagging()
)
def img2img(prompt: str, init_img, ddim_steps: int, sampler_index: int, use_GFPGAN: bool, prompt_matrix, loopback: bool, n_iter: int, batch_size: int, cfg_scale: float, denoising_strength: float, seed: int, height: int, width: int, resize_mode: int):
outpath = opts.outdir or "outputs/img2img-samples"
sampler = samplers_for_img2img[sampler_index].constructor(model)
assert 0. <= denoising_strength <= 1., 'can only work with strength in [0.0, 1.0]'
def init():
image = init_img.convert("RGB")
image = resize_image(resize_mode, image, width, height)
image = np.array(image).astype(np.float32) / 255.0
image = image[None].transpose(0, 3, 1, 2)
image = torch.from_numpy(image)
init_image = 2. * image - 1.
init_image = init_image.to(device)
init_image = repeat(init_image, '1 ... -> b ...', b=batch_size)
init_latent = model.get_first_stage_encoding(model.encode_first_stage(init_image)) # move to latent space
return init_latent,
def sample(init_data, x, conditioning, unconditional_conditioning):
t_enc = int(denoising_strength * ddim_steps)
x0, = init_data
sigmas = sampler.model_wrap.get_sigmas(ddim_steps)
noise = x * sigmas[ddim_steps - t_enc - 1]
xi = x0 + noise
sigma_sched = sigmas[ddim_steps - t_enc - 1:]
model_wrap_cfg = CFGDenoiser(sampler.model_wrap)
samples_ddim = sampler.func(model_wrap_cfg, xi, sigma_sched, extra_args={'cond': conditioning, 'uncond': unconditional_conditioning, 'cond_scale': cfg_scale}, disable=False)
return samples_ddim
if loopback:
output_images, info = None, None
history = []
initial_seed = None
for i in range(n_iter):
output_images, seed, info = process_images(
outpath=outpath,
func_init=init,
func_sample=sample,
prompt=prompt,
seed=seed,
sampler_index=0,
batch_size=1,
n_iter=1,
steps=ddim_steps,
cfg_scale=cfg_scale,
width=width,
height=height,
prompt_matrix=prompt_matrix,
use_GFPGAN=use_GFPGAN,
do_not_save_grid=True,
extra_generation_params={"Denoising Strength": denoising_strength},
)
if initial_seed is None:
initial_seed = seed
init_img = output_images[0]
seed = seed + 1
denoising_strength = max(denoising_strength * 0.95, 0.1)
history.append(init_img)
grid_count = len(os.listdir(outpath)) - 1
grid = image_grid(history, batch_size, force_n_rows=1)
save_image(grid, outpath, f"grid-{grid_count:04}", initial_seed, prompt, opts.grid_format, info=info, short_filename=not opts.grid_extended_filename)
output_images = history
seed = initial_seed
else:
output_images, seed, info = process_images(
outpath=outpath,
func_init=init,
func_sample=sample,
prompt=prompt,
seed=seed,
sampler_index=0,
batch_size=batch_size,
n_iter=n_iter,
steps=ddim_steps,
cfg_scale=cfg_scale,
width=width,
height=height,
prompt_matrix=prompt_matrix,
use_GFPGAN=use_GFPGAN,
extra_generation_params={"Denoising Strength": denoising_strength},
)
del sampler
return output_images, seed, plaintext_to_html(info)
sample_img2img = "assets/stable-samples/img2img/sketch-mountains-input.jpg"
sample_img2img = sample_img2img if os.path.exists(sample_img2img) else None
img2img_interface = gr.Interface(
wrap_gradio_call(img2img),
inputs=[
gr.Textbox(placeholder="A fantasy landscape, trending on artstation.", lines=1),
gr.Image(value=sample_img2img, source="upload", interactive=True, type="pil"),
gr.Slider(minimum=1, maximum=150, step=1, label="Sampling Steps", value=50),
gr.Radio(label='Sampling method', choices=[x.name for x in samplers_for_img2img], value=samplers_for_img2img[0].name, type="index"),
gr.Checkbox(label='Fix faces using GFPGAN', value=False, visible=GFPGAN is not None),
gr.Checkbox(label='Create prompt matrix (separate multiple prompts using |, and get all combinations of them)', value=False),
gr.Checkbox(label='Loopback (use images from previous batch when creating next batch)', value=False),
gr.Slider(minimum=1, maximum=cmd_opts.max_batch_count, step=1, label='Batch count (how many batches of images to generate)', value=1),
gr.Slider(minimum=1, maximum=8, step=1, label='Batch size (how many images are in a batch; memory-hungry)', value=1),
gr.Slider(minimum=1.0, maximum=15.0, step=0.5, label='Classifier Free Guidance Scale (how strongly the image should follow the prompt)', value=7.0),
gr.Slider(minimum=0.0, maximum=1.0, step=0.01, label='Denoising Strength', value=0.75),
gr.Number(label='Seed', value=-1),
gr.Slider(minimum=64, maximum=2048, step=64, label="Height", value=512),
gr.Slider(minimum=64, maximum=2048, step=64, label="Width", value=512),
gr.Radio(label="Resize mode", choices=["Just resize", "Crop and resize", "Resize and fill"], type="index", value="Just resize")
],
outputs=[
gr.Gallery(),
gr.Number(label='Seed'),
gr.HTML(),
],
allow_flagging="never",
)
def run_extras(image, GFPGAN_strength, RealESRGAN_upscaling, RealESRGAN_model_index):
image = image.convert("RGB")
outpath = opts.outdir or "outputs/extras-samples"
if GFPGAN is not None and GFPGAN_strength > 0:
cropped_faces, restored_faces, restored_img = GFPGAN.enhance(np.array(image, dtype=np.uint8), has_aligned=False, only_center_face=False, paste_back=True)
res = Image.fromarray(restored_img)
if GFPGAN_strength < 1.0:
res = Image.blend(image, res, GFPGAN_strength)
image = res
if have_realesrgan and RealESRGAN_upscaling != 1.0:
info = realesrgan_models[RealESRGAN_model_index]
model = info.model()
upsampler = RealESRGANer(
scale=info.netscale,
model_path=info.location,
model=model,
half=True
)
upsampled = upsampler.enhance(np.array(image), outscale=RealESRGAN_upscaling)[0]
image = Image.fromarray(upsampled)
os.makedirs(outpath, exist_ok=True)
base_count = len(os.listdir(outpath))
save_image(image, outpath, f"{base_count:05}", None, '', opts.samples_format, short_filename=True)
return image, 0, ''
extras_interface = gr.Interface(
wrap_gradio_call(run_extras),
inputs=[
gr.Image(label="Source", source="upload", interactive=True, type="pil"),
gr.Slider(minimum=0.0, maximum=1.0, step=0.001, label="GFPGAN strength", value=1, interactive=GFPGAN is not None),
gr.Slider(minimum=1.0, maximum=4.0, step=0.05, label="Real-ESRGAN upscaling", value=2, interactive=have_realesrgan),
gr.Radio(label='Real-ESRGAN model', choices=[x.name for x in realesrgan_models], value=realesrgan_models[0].name, type="index", interactive=have_realesrgan),
],
outputs=[
gr.Image(label="Result"),
gr.Number(label='Seed', visible=False),
gr.HTML(),
],
allow_flagging="never",
)
opts = Options()
if os.path.exists(config_filename):
opts.load(config_filename)
def run_settings(*args):
up = []
for key, value, comp in zip(opts.data_labels.keys(), args, settings_interface.input_components):
opts.data[key] = value
up.append(comp.update(value=value))
opts.save(config_filename)
return 'Settings saved.', ''
def create_setting_component(key):
def fun():
return opts.data[key] if key in opts.data else opts.data_labels[key][0]
labelinfo = opts.data_labels[key]
t = type(labelinfo[0])
label = labelinfo[1]
if t == str:
item = gr.Textbox(label=label, value=fun, lines=1)
elif t == int:
if len(labelinfo) == 4:
item = gr.Slider(minimum=labelinfo[2], maximum=labelinfo[3], step=1, label=label, value=fun)
else:
item = gr.Number(label=label, value=fun)
elif t == bool:
item = gr.Checkbox(label=label, value=fun)
else:
raise Exception(f'bad options item type: {str(t)} for key {key}')
return item
settings_interface = gr.Interface(
run_settings,
inputs=[create_setting_component(key) for key in opts.data_labels.keys()],
outputs=[
gr.Textbox(label='Result'),
gr.HTML(),
],
title=None,
description=None,
allow_flagging="never",
)
interfaces = [
(txt2img_interface, "txt2img"),
(img2img_interface, "img2img"),
(extras_interface, "Extras"),
(settings_interface, "Settings"),
]
config = OmegaConf.load(cmd_opts.config)
model = load_model_from_config(config, cmd_opts.ckpt)
device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
model = (model if cmd_opts.no_half else model.half()).to(device)
text_inversion_embeddings = TextInversionEmbeddings()
if os.path.exists(cmd_opts.embeddings_dir):
text_inversion_embeddings.hijack(model)
demo = gr.TabbedInterface(
interface_list=[x[0] for x in interfaces],
tab_names=[x[1] for x in interfaces],
css=("" if cmd_opts.no_progressbar_hiding else css_hide_progressbar) + """
.output-html p {margin: 0 0.5em;}
.performance { font-size: 0.85em; color: #444; }
"""
)
demo.launch()
<|code_end|>
|
FileNotFoundError after new update
Getting a FileNotFoundError: [WinError 3] The system cannot find the path specified: 'C:\\Users\\admin\\stable-diffusion-webui\\scripts' after the new update.
Not exactly good at all the coding stuff, using it just fine yesterday but I downloaded the repo instead of git clone, for the sake of easier update I started a new installation by git cloning into user folder and the installation went well but ran into this while launching through webui.py.
Python 3.10.6
venv C:\Users\admin\stable-diffusion-webui\venv\Scripts\Python.exe
Launching webui.py...
Loading model from C:\Users\admin\stable-diffusion-webui\model.ckpt
Global Step: 470000
LatentDiffusion: Running in eps-prediction mode
DiffusionWrapper has 859.52 M params.
making attention of type 'vanilla' with 512 in_channels
Working with z of shape (1, 4, 32, 32) = 4096 dimensions.
making attention of type 'vanilla' with 512 in_channels
Traceback (most recent call last):
File "C:\Users\admin\stable-diffusion-webui\webui.py", line 135, in <module>
modules.scripts.load_scripts(os.path.join(script_path, "scripts"))
File "C:\Users\admin\stable-diffusion-webui\modules\scripts.py", line 32, in load_scripts
for filename in os.listdir(basedir):
FileNotFoundError: [WinError 3] The system cannot find the path specified: 'C:\\Users\\admin\\stable-diffusion-webui\\scripts'
|
modules/scripts.py
<|code_start|>import os
import sys
import traceback
import modules.ui as ui
import gradio as gr
from modules.processing import StableDiffusionProcessing
class Script:
filename = None
args_from = None
args_to = None
def title(self):
raise NotImplementedError()
def ui(self, is_img2img):
pass
def run(self, *args):
raise NotImplementedError()
def describe(self):
return ""
scripts = []
def load_scripts(basedir):
for filename in os.listdir(basedir):
path = os.path.join(basedir, filename)
if not os.path.isfile(path):
continue
with open(path, "r", encoding="utf8") as file:
text = file.read()
from types import ModuleType
compiled = compile(text, path, 'exec')
module = ModuleType(filename)
exec(compiled, module.__dict__)
for key, script_class in module.__dict__.items():
if type(script_class) == type and issubclass(script_class, Script):
obj = script_class()
obj.filename = path
scripts.append(obj)
def wrap_call(func, filename, funcname, *args, default=None, **kwargs):
try:
res = func(*args, **kwargs)
return res
except Exception:
print(f"Error calling: {filename}/{funcname}", file=sys.stderr)
print(traceback.format_exc(), file=sys.stderr)
return default
def setup_ui(is_img2img):
titles = [wrap_call(script.title, script.filename, "title") or f"{script.filename} [error]" for script in scripts]
dropdown = gr.Dropdown(label="Script", choices=["None"] + titles, value="None", type="index")
inputs = [dropdown]
for script in scripts:
script.args_from = len(inputs)
controls = script.ui(is_img2img)
for control in controls:
control.visible = False
inputs += controls
script.args_to = len(inputs)
def select_script(index):
if index > 0:
script = scripts[index-1]
args_from = script.args_from
args_to = script.args_to
else:
args_from = 0
args_to = 0
return [ui.gr_show(True if i == 0 else args_from <= i < args_to) for i in range(len(inputs))]
dropdown.change(
fn=select_script,
inputs=[dropdown],
outputs=inputs
)
return inputs
def run(p: StableDiffusionProcessing, *args):
script_index = args[0] - 1
if script_index < 0 or script_index >= len(scripts):
return None
script = scripts[script_index]
script_args = args[script.args_from:script.args_to]
processed = script.run(p, *script_args)
return processed
<|code_end|>
|
modules/scripts.py
<|code_start|>import os
import sys
import traceback
import modules.ui as ui
import gradio as gr
from modules.processing import StableDiffusionProcessing
class Script:
filename = None
args_from = None
args_to = None
def title(self):
raise NotImplementedError()
def ui(self, is_img2img):
pass
def run(self, *args):
raise NotImplementedError()
def describe(self):
return ""
scripts = []
def load_scripts(basedir):
if not os.path.exists(basedir):
return
for filename in os.listdir(basedir):
path = os.path.join(basedir, filename)
if not os.path.isfile(path):
continue
with open(path, "r", encoding="utf8") as file:
text = file.read()
from types import ModuleType
compiled = compile(text, path, 'exec')
module = ModuleType(filename)
exec(compiled, module.__dict__)
for key, script_class in module.__dict__.items():
if type(script_class) == type and issubclass(script_class, Script):
obj = script_class()
obj.filename = path
scripts.append(obj)
def wrap_call(func, filename, funcname, *args, default=None, **kwargs):
try:
res = func(*args, **kwargs)
return res
except Exception:
print(f"Error calling: {filename}/{funcname}", file=sys.stderr)
print(traceback.format_exc(), file=sys.stderr)
return default
def setup_ui(is_img2img):
titles = [wrap_call(script.title, script.filename, "title") or f"{script.filename} [error]" for script in scripts]
dropdown = gr.Dropdown(label="Script", choices=["None"] + titles, value="None", type="index")
inputs = [dropdown]
for script in scripts:
script.args_from = len(inputs)
controls = script.ui(is_img2img)
for control in controls:
control.visible = False
inputs += controls
script.args_to = len(inputs)
def select_script(index):
if index > 0:
script = scripts[index-1]
args_from = script.args_from
args_to = script.args_to
else:
args_from = 0
args_to = 0
return [ui.gr_show(True if i == 0 else args_from <= i < args_to) for i in range(len(inputs))]
dropdown.change(
fn=select_script,
inputs=[dropdown],
outputs=inputs
)
return inputs
def run(p: StableDiffusionProcessing, *args):
script_index = args[0] - 1
if script_index < 0 or script_index >= len(scripts):
return None
script = scripts[script_index]
script_args = args[script.args_from:script.args_to]
processed = script.run(p, *script_args)
return processed
<|code_end|>
|
Error in extras tab when resizing images
**Describe the bug**
Get error "AttributeError: 'int' object has no attribute 'encode'" on trying to upscale an image in the "extras" tab.
**To Reproduce**
1. Go to 'extras
2. Click on lanczos, upload image, and press generate
3. See error on the right
**Expected behavior**
No error
**Screenshots**

**Desktop (please complete the following information):**
- OS: MacOS
- Browser: Safari
- Commit revision: 19a817d97dad1b2ce58e70cadc35ccba5cf1130e
**Additional context**
Serving from a linux box.
|
modules/images.py
<|code_start|>import datetime
import math
import os
from collections import namedtuple
import re
import numpy as np
from PIL import Image, ImageFont, ImageDraw, PngImagePlugin
from fonts.ttf import Roboto
import string
import modules.shared
from modules import sd_samplers, shared
from modules.shared import opts
LANCZOS = (Image.Resampling.LANCZOS if hasattr(Image, 'Resampling') else Image.LANCZOS)
def image_grid(imgs, batch_size=1, rows=None):
if rows is None:
if opts.n_rows > 0:
rows = opts.n_rows
elif opts.n_rows == 0:
rows = batch_size
else:
rows = math.sqrt(len(imgs))
rows = round(rows)
cols = math.ceil(len(imgs) / rows)
w, h = imgs[0].size
grid = Image.new('RGB', size=(cols * w, rows * h), color='black')
for i, img in enumerate(imgs):
grid.paste(img, box=(i % cols * w, i // cols * h))
return grid
Grid = namedtuple("Grid", ["tiles", "tile_w", "tile_h", "image_w", "image_h", "overlap"])
def split_grid(image, tile_w=512, tile_h=512, overlap=64):
w = image.width
h = image.height
non_overlap_width = tile_w - overlap
non_overlap_height = tile_h - overlap
cols = math.ceil((w - overlap) / non_overlap_width)
rows = math.ceil((h - overlap) / non_overlap_height)
dx = (w - tile_w) / (cols-1) if cols > 1 else 0
dy = (h - tile_h) / (rows-1) if rows > 1 else 0
grid = Grid([], tile_w, tile_h, w, h, overlap)
for row in range(rows):
row_images = []
y = int(row * dy)
if y + tile_h >= h:
y = h - tile_h
for col in range(cols):
x = int(col * dx)
if x+tile_w >= w:
x = w - tile_w
tile = image.crop((x, y, x + tile_w, y + tile_h))
row_images.append([x, tile_w, tile])
grid.tiles.append([y, tile_h, row_images])
return grid
def combine_grid(grid):
def make_mask_image(r):
r = r * 255 / grid.overlap
r = r.astype(np.uint8)
return Image.fromarray(r, 'L')
mask_w = make_mask_image(np.arange(grid.overlap, dtype=np.float32).reshape((1, grid.overlap)).repeat(grid.tile_h, axis=0))
mask_h = make_mask_image(np.arange(grid.overlap, dtype=np.float32).reshape((grid.overlap, 1)).repeat(grid.image_w, axis=1))
combined_image = Image.new("RGB", (grid.image_w, grid.image_h))
for y, h, row in grid.tiles:
combined_row = Image.new("RGB", (grid.image_w, h))
for x, w, tile in row:
if x == 0:
combined_row.paste(tile, (0, 0))
continue
combined_row.paste(tile.crop((0, 0, grid.overlap, h)), (x, 0), mask=mask_w)
combined_row.paste(tile.crop((grid.overlap, 0, w, h)), (x + grid.overlap, 0))
if y == 0:
combined_image.paste(combined_row, (0, 0))
continue
combined_image.paste(combined_row.crop((0, 0, combined_row.width, grid.overlap)), (0, y), mask=mask_h)
combined_image.paste(combined_row.crop((0, grid.overlap, combined_row.width, h)), (0, y + grid.overlap))
return combined_image
class GridAnnotation:
def __init__(self, text='', is_active=True):
self.text = text
self.is_active = is_active
self.size = None
def draw_grid_annotations(im, width, height, hor_texts, ver_texts):
def wrap(drawing, text, font, line_length):
lines = ['']
for word in text.split():
line = f'{lines[-1]} {word}'.strip()
if drawing.textlength(line, font=font) <= line_length:
lines[-1] = line
else:
lines.append(word)
return lines
def draw_texts(drawing, draw_x, draw_y, lines):
for i, line in enumerate(lines):
drawing.multiline_text((draw_x, draw_y + line.size[1] / 2), line.text, font=fnt, fill=color_active if line.is_active else color_inactive, anchor="mm", align="center")
if not line.is_active:
drawing.line((draw_x - line.size[0]//2, draw_y + line.size[1]//2, draw_x + line.size[0]//2, draw_y + line.size[1]//2), fill=color_inactive, width=4)
draw_y += line.size[1] + line_spacing
fontsize = (width + height) // 25
line_spacing = fontsize // 2
try:
fnt = ImageFont.truetype(opts.font or Roboto, fontsize)
except Exception:
fnt = ImageFont.truetype(Roboto, fontsize)
color_active = (0, 0, 0)
color_inactive = (153, 153, 153)
pad_left = 0 if sum([sum([len(line.text) for line in lines]) for lines in ver_texts]) == 0 else width * 3 // 4
cols = im.width // width
rows = im.height // height
assert cols == len(hor_texts), f'bad number of horizontal texts: {len(hor_texts)}; must be {cols}'
assert rows == len(ver_texts), f'bad number of vertical texts: {len(ver_texts)}; must be {rows}'
calc_img = Image.new("RGB", (1, 1), "white")
calc_d = ImageDraw.Draw(calc_img)
for texts, allowed_width in zip(hor_texts + ver_texts, [width] * len(hor_texts) + [pad_left] * len(ver_texts)):
items = [] + texts
texts.clear()
for line in items:
wrapped = wrap(calc_d, line.text, fnt, allowed_width)
texts += [GridAnnotation(x, line.is_active) for x in wrapped]
for line in texts:
bbox = calc_d.multiline_textbbox((0, 0), line.text, font=fnt)
line.size = (bbox[2] - bbox[0], bbox[3] - bbox[1])
hor_text_heights = [sum([line.size[1] + line_spacing for line in lines]) - line_spacing for lines in hor_texts]
ver_text_heights = [sum([line.size[1] + line_spacing for line in lines]) - line_spacing * len(lines) for lines in ver_texts]
pad_top = max(hor_text_heights) + line_spacing * 2
result = Image.new("RGB", (im.width + pad_left, im.height + pad_top), "white")
result.paste(im, (pad_left, pad_top))
d = ImageDraw.Draw(result)
for col in range(cols):
x = pad_left + width * col + width / 2
y = pad_top / 2 - hor_text_heights[col] / 2
draw_texts(d, x, y, hor_texts[col])
for row in range(rows):
x = pad_left / 2
y = pad_top + height * row + height / 2 - ver_text_heights[row] / 2
draw_texts(d, x, y, ver_texts[row])
return result
def draw_prompt_matrix(im, width, height, all_prompts):
prompts = all_prompts[1:]
boundary = math.ceil(len(prompts) / 2)
prompts_horiz = prompts[:boundary]
prompts_vert = prompts[boundary:]
hor_texts = [[GridAnnotation(x, is_active=pos & (1 << i) != 0) for i, x in enumerate(prompts_horiz)] for pos in range(1 << len(prompts_horiz))]
ver_texts = [[GridAnnotation(x, is_active=pos & (1 << i) != 0) for i, x in enumerate(prompts_vert)] for pos in range(1 << len(prompts_vert))]
return draw_grid_annotations(im, width, height, hor_texts, ver_texts)
def resize_image(resize_mode, im, width, height):
if resize_mode == 0:
res = im.resize((width, height), resample=LANCZOS)
elif resize_mode == 1:
ratio = width / height
src_ratio = im.width / im.height
src_w = width if ratio > src_ratio else im.width * height // im.height
src_h = height if ratio <= src_ratio else im.height * width // im.width
resized = im.resize((src_w, src_h), resample=LANCZOS)
res = Image.new("RGB", (width, height))
res.paste(resized, box=(width // 2 - src_w // 2, height // 2 - src_h // 2))
else:
ratio = width / height
src_ratio = im.width / im.height
src_w = width if ratio < src_ratio else im.width * height // im.height
src_h = height if ratio >= src_ratio else im.height * width // im.width
resized = im.resize((src_w, src_h), resample=LANCZOS)
res = Image.new("RGB", (width, height))
res.paste(resized, box=(width // 2 - src_w // 2, height // 2 - src_h // 2))
if ratio < src_ratio:
fill_height = height // 2 - src_h // 2
res.paste(resized.resize((width, fill_height), box=(0, 0, width, 0)), box=(0, 0))
res.paste(resized.resize((width, fill_height), box=(0, resized.height, width, resized.height)), box=(0, fill_height + src_h))
elif ratio > src_ratio:
fill_width = width // 2 - src_w // 2
res.paste(resized.resize((fill_width, height), box=(0, 0, 0, height)), box=(0, 0))
res.paste(resized.resize((fill_width, height), box=(resized.width, 0, resized.width, height)), box=(fill_width + src_w, 0))
return res
invalid_filename_chars = '<>:"/\\|?*\n'
re_nonletters = re.compile(r'[\s'+string.punctuation+']+')
def sanitize_filename_part(text, replace_spaces=True):
if replace_spaces:
text = text.replace(' ', '_')
return text.translate({ord(x): '' for x in invalid_filename_chars})[:128]
def apply_filename_pattern(x, p, seed, prompt):
if seed is not None:
x = x.replace("[seed]", str(seed))
if prompt is not None:
x = x.replace("[prompt]", sanitize_filename_part(prompt)[:128])
x = x.replace("[prompt_spaces]", sanitize_filename_part(prompt, replace_spaces=False)[:128])
if "[prompt_words]" in x:
words = [x for x in re_nonletters.split(prompt or "") if len(x) > 0]
if len(words) == 0:
words = ["empty"]
x = x.replace("[prompt_words]", " ".join(words[0:8]).strip())
if p is not None:
x = x.replace("[steps]", str(p.steps))
x = x.replace("[cfg]", str(p.cfg_scale))
x = x.replace("[width]", str(p.width))
x = x.replace("[height]", str(p.height))
x = x.replace("[sampler]", sd_samplers.samplers[p.sampler_index].name)
x = x.replace("[model_hash]", shared.sd_model_hash)
x = x.replace("[date]", datetime.date.today().isoformat())
return x
def save_image(image, path, basename, seed=None, prompt=None, extension='png', info=None, short_filename=False, no_prompt=False, pnginfo_section_name='parameters', p=None, existing_info=None):
# would be better to add this as an argument in future, but will do for now
is_a_grid = basename != ""
if short_filename or prompt is None or seed is None:
file_decoration = ""
elif opts.save_to_dirs:
file_decoration = opts.samples_filename_pattern or "[seed]"
else:
file_decoration = opts.samples_filename_pattern or "[seed]-[prompt_spaces]"
if file_decoration != "":
file_decoration = "-" + file_decoration.lower()
file_decoration = apply_filename_pattern(file_decoration, p, seed, prompt)
if extension == 'png' and opts.enable_pnginfo and info is not None:
pnginfo = PngImagePlugin.PngInfo()
if existing_info is not None:
for k, v in existing_info.items():
pnginfo.add_text(k, v)
pnginfo.add_text(pnginfo_section_name, info)
else:
pnginfo = None
save_to_dirs = (is_a_grid and opts.grid_save_to_dirs) or (not is_a_grid and opts.save_to_dirs)
if save_to_dirs:
dirname = apply_filename_pattern(opts.directories_filename_pattern or "[prompt_words]", p, seed, prompt)
path = os.path.join(path, dirname)
os.makedirs(path, exist_ok=True)
filecount = len([x for x in os.listdir(path) if os.path.splitext(x)[1] == '.' + extension])
fullfn = "a.png"
fullfn_without_extension = "a"
for i in range(500):
fn = f"{filecount+i:05}" if basename == '' else f"{basename}-{filecount+i:04}"
fullfn = os.path.join(path, f"{fn}{file_decoration}.{extension}")
fullfn_without_extension = os.path.join(path, f"{fn}{file_decoration}")
if not os.path.exists(fullfn):
break
image.save(fullfn, quality=opts.jpeg_quality, pnginfo=pnginfo)
target_side_length = 4000
oversize = image.width > target_side_length or image.height > target_side_length
if opts.export_for_4chan and (oversize or os.stat(fullfn).st_size > 4 * 1024 * 1024):
ratio = image.width / image.height
if oversize and ratio > 1:
image = image.resize((target_side_length, image.height * target_side_length // image.width), LANCZOS)
elif oversize:
image = image.resize((image.width * target_side_length // image.height, target_side_length), LANCZOS)
image.save(f"{fullfn_without_extension}.jpg", quality=opts.jpeg_quality, pnginfo=pnginfo)
if opts.save_txt and info is not None:
with open(f"{fullfn_without_extension}.txt", "w", encoding="utf8") as file:
file.write(info + "\n")
class Upscaler:
name = "Lanczos"
def do_upscale(self, img):
return img
def upscale(self, img, w, h):
for i in range(3):
if img.width >= w and img.height >= h:
break
img = self.do_upscale(img)
if img.width != w or img.height != h:
img = img.resize((int(w), int(h)), resample=LANCZOS)
return img
class UpscalerNone(Upscaler):
name = "None"
def upscale(self, img, w, h):
return img
modules.shared.sd_upscalers.append(UpscalerNone())
modules.shared.sd_upscalers.append(Upscaler())
<|code_end|>
|
modules/images.py
<|code_start|>import datetime
import math
import os
from collections import namedtuple
import re
import numpy as np
from PIL import Image, ImageFont, ImageDraw, PngImagePlugin
from fonts.ttf import Roboto
import string
import modules.shared
from modules import sd_samplers, shared
from modules.shared import opts
LANCZOS = (Image.Resampling.LANCZOS if hasattr(Image, 'Resampling') else Image.LANCZOS)
def image_grid(imgs, batch_size=1, rows=None):
if rows is None:
if opts.n_rows > 0:
rows = opts.n_rows
elif opts.n_rows == 0:
rows = batch_size
else:
rows = math.sqrt(len(imgs))
rows = round(rows)
cols = math.ceil(len(imgs) / rows)
w, h = imgs[0].size
grid = Image.new('RGB', size=(cols * w, rows * h), color='black')
for i, img in enumerate(imgs):
grid.paste(img, box=(i % cols * w, i // cols * h))
return grid
Grid = namedtuple("Grid", ["tiles", "tile_w", "tile_h", "image_w", "image_h", "overlap"])
def split_grid(image, tile_w=512, tile_h=512, overlap=64):
w = image.width
h = image.height
non_overlap_width = tile_w - overlap
non_overlap_height = tile_h - overlap
cols = math.ceil((w - overlap) / non_overlap_width)
rows = math.ceil((h - overlap) / non_overlap_height)
dx = (w - tile_w) / (cols-1) if cols > 1 else 0
dy = (h - tile_h) / (rows-1) if rows > 1 else 0
grid = Grid([], tile_w, tile_h, w, h, overlap)
for row in range(rows):
row_images = []
y = int(row * dy)
if y + tile_h >= h:
y = h - tile_h
for col in range(cols):
x = int(col * dx)
if x+tile_w >= w:
x = w - tile_w
tile = image.crop((x, y, x + tile_w, y + tile_h))
row_images.append([x, tile_w, tile])
grid.tiles.append([y, tile_h, row_images])
return grid
def combine_grid(grid):
def make_mask_image(r):
r = r * 255 / grid.overlap
r = r.astype(np.uint8)
return Image.fromarray(r, 'L')
mask_w = make_mask_image(np.arange(grid.overlap, dtype=np.float32).reshape((1, grid.overlap)).repeat(grid.tile_h, axis=0))
mask_h = make_mask_image(np.arange(grid.overlap, dtype=np.float32).reshape((grid.overlap, 1)).repeat(grid.image_w, axis=1))
combined_image = Image.new("RGB", (grid.image_w, grid.image_h))
for y, h, row in grid.tiles:
combined_row = Image.new("RGB", (grid.image_w, h))
for x, w, tile in row:
if x == 0:
combined_row.paste(tile, (0, 0))
continue
combined_row.paste(tile.crop((0, 0, grid.overlap, h)), (x, 0), mask=mask_w)
combined_row.paste(tile.crop((grid.overlap, 0, w, h)), (x + grid.overlap, 0))
if y == 0:
combined_image.paste(combined_row, (0, 0))
continue
combined_image.paste(combined_row.crop((0, 0, combined_row.width, grid.overlap)), (0, y), mask=mask_h)
combined_image.paste(combined_row.crop((0, grid.overlap, combined_row.width, h)), (0, y + grid.overlap))
return combined_image
class GridAnnotation:
def __init__(self, text='', is_active=True):
self.text = text
self.is_active = is_active
self.size = None
def draw_grid_annotations(im, width, height, hor_texts, ver_texts):
def wrap(drawing, text, font, line_length):
lines = ['']
for word in text.split():
line = f'{lines[-1]} {word}'.strip()
if drawing.textlength(line, font=font) <= line_length:
lines[-1] = line
else:
lines.append(word)
return lines
def draw_texts(drawing, draw_x, draw_y, lines):
for i, line in enumerate(lines):
drawing.multiline_text((draw_x, draw_y + line.size[1] / 2), line.text, font=fnt, fill=color_active if line.is_active else color_inactive, anchor="mm", align="center")
if not line.is_active:
drawing.line((draw_x - line.size[0]//2, draw_y + line.size[1]//2, draw_x + line.size[0]//2, draw_y + line.size[1]//2), fill=color_inactive, width=4)
draw_y += line.size[1] + line_spacing
fontsize = (width + height) // 25
line_spacing = fontsize // 2
try:
fnt = ImageFont.truetype(opts.font or Roboto, fontsize)
except Exception:
fnt = ImageFont.truetype(Roboto, fontsize)
color_active = (0, 0, 0)
color_inactive = (153, 153, 153)
pad_left = 0 if sum([sum([len(line.text) for line in lines]) for lines in ver_texts]) == 0 else width * 3 // 4
cols = im.width // width
rows = im.height // height
assert cols == len(hor_texts), f'bad number of horizontal texts: {len(hor_texts)}; must be {cols}'
assert rows == len(ver_texts), f'bad number of vertical texts: {len(ver_texts)}; must be {rows}'
calc_img = Image.new("RGB", (1, 1), "white")
calc_d = ImageDraw.Draw(calc_img)
for texts, allowed_width in zip(hor_texts + ver_texts, [width] * len(hor_texts) + [pad_left] * len(ver_texts)):
items = [] + texts
texts.clear()
for line in items:
wrapped = wrap(calc_d, line.text, fnt, allowed_width)
texts += [GridAnnotation(x, line.is_active) for x in wrapped]
for line in texts:
bbox = calc_d.multiline_textbbox((0, 0), line.text, font=fnt)
line.size = (bbox[2] - bbox[0], bbox[3] - bbox[1])
hor_text_heights = [sum([line.size[1] + line_spacing for line in lines]) - line_spacing for lines in hor_texts]
ver_text_heights = [sum([line.size[1] + line_spacing for line in lines]) - line_spacing * len(lines) for lines in ver_texts]
pad_top = max(hor_text_heights) + line_spacing * 2
result = Image.new("RGB", (im.width + pad_left, im.height + pad_top), "white")
result.paste(im, (pad_left, pad_top))
d = ImageDraw.Draw(result)
for col in range(cols):
x = pad_left + width * col + width / 2
y = pad_top / 2 - hor_text_heights[col] / 2
draw_texts(d, x, y, hor_texts[col])
for row in range(rows):
x = pad_left / 2
y = pad_top + height * row + height / 2 - ver_text_heights[row] / 2
draw_texts(d, x, y, ver_texts[row])
return result
def draw_prompt_matrix(im, width, height, all_prompts):
prompts = all_prompts[1:]
boundary = math.ceil(len(prompts) / 2)
prompts_horiz = prompts[:boundary]
prompts_vert = prompts[boundary:]
hor_texts = [[GridAnnotation(x, is_active=pos & (1 << i) != 0) for i, x in enumerate(prompts_horiz)] for pos in range(1 << len(prompts_horiz))]
ver_texts = [[GridAnnotation(x, is_active=pos & (1 << i) != 0) for i, x in enumerate(prompts_vert)] for pos in range(1 << len(prompts_vert))]
return draw_grid_annotations(im, width, height, hor_texts, ver_texts)
def resize_image(resize_mode, im, width, height):
if resize_mode == 0:
res = im.resize((width, height), resample=LANCZOS)
elif resize_mode == 1:
ratio = width / height
src_ratio = im.width / im.height
src_w = width if ratio > src_ratio else im.width * height // im.height
src_h = height if ratio <= src_ratio else im.height * width // im.width
resized = im.resize((src_w, src_h), resample=LANCZOS)
res = Image.new("RGB", (width, height))
res.paste(resized, box=(width // 2 - src_w // 2, height // 2 - src_h // 2))
else:
ratio = width / height
src_ratio = im.width / im.height
src_w = width if ratio < src_ratio else im.width * height // im.height
src_h = height if ratio >= src_ratio else im.height * width // im.width
resized = im.resize((src_w, src_h), resample=LANCZOS)
res = Image.new("RGB", (width, height))
res.paste(resized, box=(width // 2 - src_w // 2, height // 2 - src_h // 2))
if ratio < src_ratio:
fill_height = height // 2 - src_h // 2
res.paste(resized.resize((width, fill_height), box=(0, 0, width, 0)), box=(0, 0))
res.paste(resized.resize((width, fill_height), box=(0, resized.height, width, resized.height)), box=(0, fill_height + src_h))
elif ratio > src_ratio:
fill_width = width // 2 - src_w // 2
res.paste(resized.resize((fill_width, height), box=(0, 0, 0, height)), box=(0, 0))
res.paste(resized.resize((fill_width, height), box=(resized.width, 0, resized.width, height)), box=(fill_width + src_w, 0))
return res
invalid_filename_chars = '<>:"/\\|?*\n'
re_nonletters = re.compile(r'[\s'+string.punctuation+']+')
def sanitize_filename_part(text, replace_spaces=True):
if replace_spaces:
text = text.replace(' ', '_')
return text.translate({ord(x): '' for x in invalid_filename_chars})[:128]
def apply_filename_pattern(x, p, seed, prompt):
if seed is not None:
x = x.replace("[seed]", str(seed))
if prompt is not None:
x = x.replace("[prompt]", sanitize_filename_part(prompt)[:128])
x = x.replace("[prompt_spaces]", sanitize_filename_part(prompt, replace_spaces=False)[:128])
if "[prompt_words]" in x:
words = [x for x in re_nonletters.split(prompt or "") if len(x) > 0]
if len(words) == 0:
words = ["empty"]
x = x.replace("[prompt_words]", " ".join(words[0:8]).strip())
if p is not None:
x = x.replace("[steps]", str(p.steps))
x = x.replace("[cfg]", str(p.cfg_scale))
x = x.replace("[width]", str(p.width))
x = x.replace("[height]", str(p.height))
x = x.replace("[sampler]", sd_samplers.samplers[p.sampler_index].name)
x = x.replace("[model_hash]", shared.sd_model_hash)
x = x.replace("[date]", datetime.date.today().isoformat())
return x
def save_image(image, path, basename, seed=None, prompt=None, extension='png', info=None, short_filename=False, no_prompt=False, pnginfo_section_name='parameters', p=None, existing_info=None):
# would be better to add this as an argument in future, but will do for now
is_a_grid = basename != ""
if short_filename or prompt is None or seed is None:
file_decoration = ""
elif opts.save_to_dirs:
file_decoration = opts.samples_filename_pattern or "[seed]"
else:
file_decoration = opts.samples_filename_pattern or "[seed]-[prompt_spaces]"
if file_decoration != "":
file_decoration = "-" + file_decoration.lower()
file_decoration = apply_filename_pattern(file_decoration, p, seed, prompt)
if extension == 'png' and opts.enable_pnginfo and info is not None:
pnginfo = PngImagePlugin.PngInfo()
if existing_info is not None:
for k, v in existing_info.items():
pnginfo.add_text(k, str(v))
pnginfo.add_text(pnginfo_section_name, info)
else:
pnginfo = None
save_to_dirs = (is_a_grid and opts.grid_save_to_dirs) or (not is_a_grid and opts.save_to_dirs)
if save_to_dirs:
dirname = apply_filename_pattern(opts.directories_filename_pattern or "[prompt_words]", p, seed, prompt)
path = os.path.join(path, dirname)
os.makedirs(path, exist_ok=True)
filecount = len([x for x in os.listdir(path) if os.path.splitext(x)[1] == '.' + extension])
fullfn = "a.png"
fullfn_without_extension = "a"
for i in range(500):
fn = f"{filecount+i:05}" if basename == '' else f"{basename}-{filecount+i:04}"
fullfn = os.path.join(path, f"{fn}{file_decoration}.{extension}")
fullfn_without_extension = os.path.join(path, f"{fn}{file_decoration}")
if not os.path.exists(fullfn):
break
image.save(fullfn, quality=opts.jpeg_quality, pnginfo=pnginfo)
target_side_length = 4000
oversize = image.width > target_side_length or image.height > target_side_length
if opts.export_for_4chan and (oversize or os.stat(fullfn).st_size > 4 * 1024 * 1024):
ratio = image.width / image.height
if oversize and ratio > 1:
image = image.resize((target_side_length, image.height * target_side_length // image.width), LANCZOS)
elif oversize:
image = image.resize((image.width * target_side_length // image.height, target_side_length), LANCZOS)
image.save(f"{fullfn_without_extension}.jpg", quality=opts.jpeg_quality, pnginfo=pnginfo)
if opts.save_txt and info is not None:
with open(f"{fullfn_without_extension}.txt", "w", encoding="utf8") as file:
file.write(info + "\n")
class Upscaler:
name = "Lanczos"
def do_upscale(self, img):
return img
def upscale(self, img, w, h):
for i in range(3):
if img.width >= w and img.height >= h:
break
img = self.do_upscale(img)
if img.width != w or img.height != h:
img = img.resize((int(w), int(h)), resample=LANCZOS)
return img
class UpscalerNone(Upscaler):
name = "None"
def upscale(self, img, w, h):
return img
modules.shared.sd_upscalers.append(UpscalerNone())
modules.shared.sd_upscalers.append(Upscaler())
<|code_end|>
|
New VRAM Monitoring throws errors on an AMD powered install
As the title says, after updating to the latest branch, stable diffusion stopped worked. After some debugging and rudimentary coding around, removing any references to the Memmon.py allows for the webui to work as usual again.
Perhaps an option to outright disable the memory monitoring?
|
modules/memmon.py
<|code_start|>import threading
import time
from collections import defaultdict
import torch
class MemUsageMonitor(threading.Thread):
run_flag = None
device = None
disabled = False
opts = None
data = None
def __init__(self, name, device, opts):
threading.Thread.__init__(self)
self.name = name
self.device = device
self.opts = opts
self.daemon = True
self.run_flag = threading.Event()
self.data = defaultdict(int)
def run(self):
if self.disabled:
return
while True:
self.run_flag.wait()
torch.cuda.reset_peak_memory_stats()
self.data.clear()
if self.opts.memmon_poll_rate <= 0:
self.run_flag.clear()
continue
self.data["min_free"] = torch.cuda.mem_get_info()[0]
while self.run_flag.is_set():
free, total = torch.cuda.mem_get_info() # calling with self.device errors, torch bug?
self.data["min_free"] = min(self.data["min_free"], free)
time.sleep(1 / self.opts.memmon_poll_rate)
def dump_debug(self):
print(self, 'recorded data:')
for k, v in self.read().items():
print(k, -(v // -(1024 ** 2)))
print(self, 'raw torch memory stats:')
tm = torch.cuda.memory_stats(self.device)
for k, v in tm.items():
if 'bytes' not in k:
continue
print('\t' if 'peak' in k else '', k, -(v // -(1024 ** 2)))
print(torch.cuda.memory_summary())
def monitor(self):
self.run_flag.set()
def read(self):
free, total = torch.cuda.mem_get_info()
self.data["total"] = total
torch_stats = torch.cuda.memory_stats(self.device)
self.data["active_peak"] = torch_stats["active_bytes.all.peak"]
self.data["reserved_peak"] = torch_stats["reserved_bytes.all.peak"]
self.data["system_peak"] = total - self.data["min_free"]
return self.data
def stop(self):
self.run_flag.clear()
return self.read()
<|code_end|>
modules/ui.py
<|code_start|>import base64
import html
import io
import json
import math
import mimetypes
import os
import random
import sys
import time
import traceback
import numpy as np
import torch
from PIL import Image
import gradio as gr
import gradio.utils
import gradio.routes
from modules.paths import script_path
from modules.shared import opts, cmd_opts
import modules.shared as shared
from modules.sd_samplers import samplers, samplers_for_img2img
import modules.realesrgan_model as realesrgan
import modules.scripts
import modules.gfpgan_model
import modules.codeformer_model
import modules.styles
# this is a fix for Windows users. Without it, javascript files will be served with text/html content-type and the bowser will not show any UI
mimetypes.init()
mimetypes.add_type('application/javascript', '.js')
if not cmd_opts.share and not cmd_opts.listen:
# fix gradio phoning home
gradio.utils.version_check = lambda: None
gradio.utils.get_local_ip_address = lambda: '127.0.0.1'
def gr_show(visible=True):
return {"visible": visible, "__type__": "update"}
sample_img2img = "assets/stable-samples/img2img/sketch-mountains-input.jpg"
sample_img2img = sample_img2img if os.path.exists(sample_img2img) else None
css_hide_progressbar = """
.wrap .m-12 svg { display:none!important; }
.wrap .m-12::before { content:"Loading..." }
.progress-bar { display:none!important; }
.meta-text { display:none!important; }
"""
def plaintext_to_html(text):
text = "<p>" + "<br>\n".join([f"{html.escape(x)}" for x in text.split('\n')]) + "</p>"
return text
def image_from_url_text(filedata):
if type(filedata) == list:
if len(filedata) == 0:
return None
filedata = filedata[0]
if filedata.startswith("data:image/png;base64,"):
filedata = filedata[len("data:image/png;base64,"):]
filedata = base64.decodebytes(filedata.encode('utf-8'))
image = Image.open(io.BytesIO(filedata))
return image
def send_gradio_gallery_to_image(x):
if len(x) == 0:
return None
return image_from_url_text(x[0])
def save_files(js_data, images, index):
import csv
os.makedirs(opts.outdir_save, exist_ok=True)
filenames = []
data = json.loads(js_data)
if index > -1 and opts.save_selected_only and (index > 0 or not opts.return_grid): # ensures we are looking at a specific non-grid picture, and we have save_selected_only
images = [images[index]]
data["seed"] += (index - 1 if opts.return_grid else index)
with open(os.path.join(opts.outdir_save, "log.csv"), "a", encoding="utf8", newline='') as file:
at_start = file.tell() == 0
writer = csv.writer(file)
if at_start:
writer.writerow(["prompt", "seed", "width", "height", "sampler", "cfgs", "steps", "filename", "negative_prompt"])
filename_base = str(int(time.time() * 1000))
for i, filedata in enumerate(images):
filename = filename_base + ("" if len(images) == 1 else "-" + str(i + 1)) + ".png"
filepath = os.path.join(opts.outdir_save, filename)
if filedata.startswith("data:image/png;base64,"):
filedata = filedata[len("data:image/png;base64,"):]
with open(filepath, "wb") as imgfile:
imgfile.write(base64.decodebytes(filedata.encode('utf-8')))
filenames.append(filename)
writer.writerow([data["prompt"], data["seed"], data["width"], data["height"], data["sampler"], data["cfg_scale"], data["steps"], filenames[0], data["negative_prompt"]])
return '', '', plaintext_to_html(f"Saved: {filenames[0]}")
def wrap_gradio_call(func):
def f(*args, **kwargs):
shared.mem_mon.monitor()
t = time.perf_counter()
try:
res = list(func(*args, **kwargs))
except Exception as e:
print("Error completing request", file=sys.stderr)
print("Arguments:", args, kwargs, file=sys.stderr)
print(traceback.format_exc(), file=sys.stderr)
shared.state.job = ""
shared.state.job_count = 0
res = [None, '', f"<div class='error'>{plaintext_to_html(type(e).__name__+': '+str(e))}</div>"]
elapsed = time.perf_counter() - t
mem_stats = {k: -(v//-(1024*1024)) for k,v in shared.mem_mon.stop().items()}
active_peak = mem_stats['active_peak']
reserved_peak = mem_stats['reserved_peak']
sys_peak = '?' if opts.memmon_poll_rate <= 0 else mem_stats['system_peak']
sys_total = mem_stats['total']
sys_pct = '?' if opts.memmon_poll_rate <= 0 else round(sys_peak/sys_total * 100, 2)
vram_tooltip = "Torch active: Peak amount of VRAM used by Torch during generation, excluding cached data.
" \
"Torch reserved: Peak amount of VRAM allocated by Torch, including all active and cached data.
" \
"Sys VRAM: Peak amount of VRAM allocation across all applications / total GPU VRAM (peak utilization%)."
vram_html = '' if opts.memmon_poll_rate == 0 else f"<p class='vram' title='{vram_tooltip}'>Torch active/reserved: {active_peak}/{reserved_peak} MiB, <wbr>Sys VRAM: {sys_peak}/{sys_total} MiB ({sys_pct}%)</p>"
# last item is always HTML
res[-1] += f"<div class='performance'><p class='time'>Time taken: <wbr>{elapsed:.2f}s</p>{vram_html}</div>"
shared.state.interrupted = False
return tuple(res)
return f
def check_progress_call():
if shared.state.job_count == 0:
return "", gr_show(False), gr_show(False)
progress = 0
if shared.state.job_count > 0:
progress += shared.state.job_no / shared.state.job_count
if shared.state.sampling_steps > 0:
progress += 1 / shared.state.job_count * shared.state.sampling_step / shared.state.sampling_steps
progress = min(progress, 1)
progressbar = ""
if opts.show_progressbar:
progressbar = f"""<div class='progressDiv'><div class='progress' style="width:{progress * 100}%">{str(int(progress*100))+"%" if progress > 0.01 else ""}</div></div>"""
image = gr_show(False)
preview_visibility = gr_show(False)
if opts.show_progress_every_n_steps > 0:
if shared.parallel_processing_allowed:
if shared.state.sampling_step - shared.state.current_image_sampling_step >= opts.show_progress_every_n_steps and shared.state.current_latent is not None:
shared.state.current_image = modules.sd_samplers.sample_to_image(shared.state.current_latent)
shared.state.current_image_sampling_step = shared.state.sampling_step
image = shared.state.current_image
if image is None or progress >= 1:
image = gr.update(value=None)
else:
preview_visibility = gr_show(True)
return f"<span style='display: none'>{time.time()}</span><p>{progressbar}</p>", preview_visibility, image
def check_progress_call_initial():
shared.state.job_count = -1
return check_progress_call()
def roll_artist(prompt):
allowed_cats = set([x for x in shared.artist_db.categories() if len(opts.random_artist_categories)==0 or x in opts.random_artist_categories])
artist = random.choice([x for x in shared.artist_db.artists if x.category in allowed_cats])
return prompt + ", " + artist.name if prompt != '' else artist.name
def visit(x, func, path=""):
if hasattr(x, 'children'):
for c in x.children:
visit(c, func, path)
elif x.label is not None:
func(path + "/" + str(x.label), x)
def create_seed_inputs():
with gr.Row():
seed = gr.Number(label='Seed', value=-1)
subseed = gr.Number(label='Variation seed', value=-1, visible=False)
seed_checkbox = gr.Checkbox(label="Extra", elem_id="subseed_show", value=False)
with gr.Row():
subseed_strength = gr.Slider(label='Variation strength', value=0.0, minimum=0, maximum=1, step=0.01, visible=False)
seed_resize_from_w = gr.Slider(minimum=0, maximum=2048, step=64, label="Resize seed from width", value=0, visible=False)
seed_resize_from_h = gr.Slider(minimum=0, maximum=2048, step=64, label="Resize seed from height", value=0, visible=False)
def change_visiblity(show):
return {
subseed: gr_show(show),
subseed_strength: gr_show(show),
seed_resize_from_h: gr_show(show),
seed_resize_from_w: gr_show(show),
}
seed_checkbox.change(
change_visiblity,
inputs=[seed_checkbox],
outputs=[
subseed,
subseed_strength,
seed_resize_from_h,
seed_resize_from_w
]
)
return seed, subseed, subseed_strength, seed_resize_from_h, seed_resize_from_w
def add_style(name: str, prompt: str, negative_prompt: str):
if name is None:
return [gr_show(), gr_show()]
style = modules.styles.PromptStyle(name, prompt, negative_prompt)
shared.prompt_styles.styles[style.name] = style
# Save all loaded prompt styles: this allows us to update the storage format in the future more easily, because we
# reserialize all styles every time we save them
shared.prompt_styles.save_styles(shared.styles_filename)
update = {"visible": True, "choices": list(shared.prompt_styles.styles), "__type__": "update"}
return [update, update, update, update]
def apply_styles(prompt, prompt_neg, style1_name, style2_name):
prompt = shared.prompt_styles.apply_styles_to_prompt(prompt, [style1_name, style2_name])
prompt_neg = shared.prompt_styles.apply_negative_styles_to_prompt(prompt_neg, [style1_name, style2_name])
return [gr.Textbox.update(value=prompt), gr.Textbox.update(value=prompt_neg), gr.Dropdown.update(value="None"), gr.Dropdown.update(value="None")]
def interrogate(image):
prompt = shared.interrogator.interrogate(image)
return gr_show(True) if prompt is None else prompt
def create_toprow(is_img2img):
with gr.Row(elem_id="toprow"):
with gr.Column(scale=4):
with gr.Row():
with gr.Column(scale=8):
with gr.Row():
prompt = gr.Textbox(label="Prompt", elem_id="prompt", show_label=False, placeholder="Prompt", lines=2)
roll = gr.Button('Roll', elem_id="roll", visible=len(shared.artist_db.artists) > 0)
with gr.Column(scale=1, elem_id="style_pos_col"):
prompt_style = gr.Dropdown(label="Style 1", elem_id="style_index", choices=[k for k, v in shared.prompt_styles.styles.items()], value=next(iter(shared.prompt_styles.styles.keys())), visible=len(shared.prompt_styles.styles) > 1)
with gr.Row():
with gr.Column(scale=8):
negative_prompt = gr.Textbox(label="Negative prompt", elem_id="negative_prompt", show_label=False, placeholder="Negative prompt", lines=2)
with gr.Column(scale=1, elem_id="style_neg_col"):
prompt_style2 = gr.Dropdown(label="Style 2", elem_id="style2_index", choices=[k for k, v in shared.prompt_styles.styles.items()], value=next(iter(shared.prompt_styles.styles.keys())), visible=len(shared.prompt_styles.styles) > 1)
with gr.Column(scale=1):
with gr.Row():
submit = gr.Button('Generate', elem_id="generate", variant='primary')
with gr.Row():
if is_img2img:
interrogate = gr.Button('Interrogate', elem_id="interrogate")
else:
interrogate = None
prompt_style_apply = gr.Button('Apply style', elem_id="style_apply")
save_style = gr.Button('Create style', elem_id="style_create")
return prompt, roll, prompt_style, negative_prompt, prompt_style2, submit, interrogate, prompt_style_apply, save_style
def setup_progressbar(progressbar, preview):
check_progress = gr.Button('Check progress', elem_id="check_progress", visible=False)
check_progress.click(
fn=check_progress_call,
show_progress=False,
inputs=[],
outputs=[progressbar, preview, preview],
)
check_progress_initial = gr.Button('Check progress (first)', elem_id="check_progress_initial", visible=False)
check_progress_initial.click(
fn=check_progress_call_initial,
show_progress=False,
inputs=[],
outputs=[progressbar, preview, preview],
)
def create_ui(txt2img, img2img, run_extras, run_pnginfo):
with gr.Blocks(analytics_enabled=False) as txt2img_interface:
txt2img_prompt, roll, txt2img_prompt_style, txt2img_negative_prompt, txt2img_prompt_style2, submit, _, txt2img_prompt_style_apply, txt2img_save_style = create_toprow(is_img2img=False)
with gr.Row().style(equal_height=False):
with gr.Column(variant='panel'):
steps = gr.Slider(minimum=1, maximum=150, step=1, label="Sampling Steps", value=20)
sampler_index = gr.Radio(label='Sampling method', elem_id="txt2img_sampling", choices=[x.name for x in samplers], value=samplers[0].name, type="index")
with gr.Row():
restore_faces = gr.Checkbox(label='Restore faces', value=False, visible=len(shared.face_restorers) > 1)
tiling = gr.Checkbox(label='Tiling', value=False)
with gr.Row():
batch_count = gr.Slider(minimum=1, maximum=cmd_opts.max_batch_count, step=1, label='Batch count', value=1)
batch_size = gr.Slider(minimum=1, maximum=8, step=1, label='Batch size', value=1)
cfg_scale = gr.Slider(minimum=1.0, maximum=30.0, step=0.5, label='CFG Scale', value=7.0)
with gr.Group():
width = gr.Slider(minimum=64, maximum=2048, step=64, label="Width", value=512)
height = gr.Slider(minimum=64, maximum=2048, step=64, label="Height", value=512)
seed, subseed, subseed_strength, seed_resize_from_h, seed_resize_from_w = create_seed_inputs()
with gr.Group():
custom_inputs = modules.scripts.scripts_txt2img.setup_ui(is_img2img=False)
with gr.Column(variant='panel'):
progressbar = gr.HTML(elem_id="progressbar")
with gr.Group():
txt2img_preview = gr.Image(elem_id='txt2img_preview', visible=False)
txt2img_gallery = gr.Gallery(label='Output', elem_id='txt2img_gallery').style(grid=4)
setup_progressbar(progressbar, txt2img_preview)
with gr.Group():
with gr.Row():
save = gr.Button('Save')
send_to_img2img = gr.Button('Send to img2img')
send_to_inpaint = gr.Button('Send to inpaint')
send_to_extras = gr.Button('Send to extras')
interrupt = gr.Button('Interrupt')
with gr.Group():
html_info = gr.HTML()
generation_info = gr.Textbox(visible=False)
txt2img_args = dict(
fn=txt2img,
_js="submit",
inputs=[
txt2img_prompt,
txt2img_negative_prompt,
txt2img_prompt_style,
txt2img_prompt_style2,
steps,
sampler_index,
restore_faces,
tiling,
batch_count,
batch_size,
cfg_scale,
seed,
subseed, subseed_strength, seed_resize_from_h, seed_resize_from_w,
height,
width,
] + custom_inputs,
outputs=[
txt2img_gallery,
generation_info,
html_info
],
show_progress=False,
)
txt2img_prompt.submit(**txt2img_args)
submit.click(**txt2img_args)
interrupt.click(
fn=lambda: shared.state.interrupt(),
inputs=[],
outputs=[],
)
save.click(
fn=wrap_gradio_call(save_files),
_js = "(x, y, z) => [x, y, selected_gallery_index()]",
inputs=[
generation_info,
txt2img_gallery,
html_info
],
outputs=[
html_info,
html_info,
html_info,
]
)
roll.click(
fn=roll_artist,
inputs=[
txt2img_prompt,
],
outputs=[
txt2img_prompt,
]
)
with gr.Blocks(analytics_enabled=False) as img2img_interface:
img2img_prompt, roll, img2img_prompt_style, img2img_negative_prompt, img2img_prompt_style2, submit, img2img_interrogate, img2img_prompt_style_apply, img2img_save_style = create_toprow(is_img2img=True)
with gr.Row().style(equal_height=False):
with gr.Column(variant='panel'):
with gr.Group():
switch_mode = gr.Radio(label='Mode', elem_id="img2img_mode", choices=['Redraw whole image', 'Inpaint a part of image', 'SD upscale'], value='Redraw whole image', type="index", show_label=False)
init_img = gr.Image(label="Image for img2img", source="upload", interactive=True, type="pil")
init_img_with_mask = gr.Image(label="Image for inpainting with mask", elem_id="img2maskimg", source="upload", interactive=True, type="pil", tool="sketch", visible=False, image_mode="RGBA")
init_mask = gr.Image(label="Mask", source="upload", interactive=True, type="pil", visible=False)
init_img_with_mask_comment = gr.HTML(elem_id="mask_bug_info", value="<small>if the editor shows ERROR, switch to another tab and back, then to another img2img mode above and back</small>", visible=False)
with gr.Row():
resize_mode = gr.Radio(label="Resize mode", elem_id="resize_mode", show_label=False, choices=["Just resize", "Crop and resize", "Resize and fill"], type="index", value="Just resize")
mask_mode = gr.Radio(label="Mask mode", show_label=False, choices=["Draw mask", "Upload mask"], type="index", value="Draw mask")
steps = gr.Slider(minimum=1, maximum=150, step=1, label="Sampling Steps", value=20)
sampler_index = gr.Radio(label='Sampling method', choices=[x.name for x in samplers_for_img2img], value=samplers_for_img2img[0].name, type="index")
mask_blur = gr.Slider(label='Mask blur', minimum=0, maximum=64, step=1, value=4, visible=False)
inpainting_fill = gr.Radio(label='Masked content', choices=['fill', 'original', 'latent noise', 'latent nothing'], value='fill', type="index", visible=False)
with gr.Row():
inpaint_full_res = gr.Checkbox(label='Inpaint at full resolution', value=False, visible=False)
inpainting_mask_invert = gr.Radio(label='Masking mode', choices=['Inpaint masked', 'Inpaint not masked'], value='Inpaint masked', type="index", visible=False)
with gr.Row():
restore_faces = gr.Checkbox(label='Restore faces', value=False, visible=len(shared.face_restorers) > 1)
tiling = gr.Checkbox(label='Tiling', value=False)
sd_upscale_overlap = gr.Slider(minimum=0, maximum=256, step=16, label='Tile overlap', value=64, visible=False)
with gr.Row():
sd_upscale_upscaler_name = gr.Radio(label='Upscaler', choices=[x.name for x in shared.sd_upscalers], value=shared.sd_upscalers[0].name, type="index", visible=False)
with gr.Row():
batch_count = gr.Slider(minimum=1, maximum=cmd_opts.max_batch_count, step=1, label='Batch count', value=1)
batch_size = gr.Slider(minimum=1, maximum=8, step=1, label='Batch size', value=1)
with gr.Group():
cfg_scale = gr.Slider(minimum=1.0, maximum=30.0, step=0.5, label='CFG Scale', value=7.0)
denoising_strength = gr.Slider(minimum=0.0, maximum=1.0, step=0.01, label='Denoising strength', value=0.75)
with gr.Group():
width = gr.Slider(minimum=64, maximum=2048, step=64, label="Width", value=512)
height = gr.Slider(minimum=64, maximum=2048, step=64, label="Height", value=512)
seed, subseed, subseed_strength, seed_resize_from_h, seed_resize_from_w = create_seed_inputs()
with gr.Group():
custom_inputs = modules.scripts.scripts_img2img.setup_ui(is_img2img=True)
with gr.Column(variant='panel'):
progressbar = gr.HTML(elem_id="progressbar")
with gr.Group():
img2img_preview = gr.Image(elem_id='img2img_preview', visible=False)
img2img_gallery = gr.Gallery(label='Output', elem_id='img2img_gallery').style(grid=4)
setup_progressbar(progressbar, img2img_preview)
with gr.Group():
with gr.Row():
save = gr.Button('Save')
img2img_send_to_img2img = gr.Button('Send to img2img')
img2img_send_to_inpaint = gr.Button('Send to inpaint')
img2img_send_to_extras = gr.Button('Send to extras')
interrupt = gr.Button('Interrupt')
img2img_save_style = gr.Button('Save prompt as style')
with gr.Group():
html_info = gr.HTML()
generation_info = gr.Textbox(visible=False)
def apply_mode(mode, uploadmask):
is_classic = mode == 0
is_inpaint = mode == 1
is_upscale = mode == 2
return {
init_img: gr_show(not is_inpaint or (is_inpaint and uploadmask == 1)),
init_img_with_mask: gr_show(is_inpaint and uploadmask == 0),
init_img_with_mask_comment: gr_show(is_inpaint and uploadmask == 0),
init_mask: gr_show(is_inpaint and uploadmask == 1),
mask_mode: gr_show(is_inpaint),
mask_blur: gr_show(is_inpaint),
inpainting_fill: gr_show(is_inpaint),
sd_upscale_upscaler_name: gr_show(is_upscale),
sd_upscale_overlap: gr_show(is_upscale),
inpaint_full_res: gr_show(is_inpaint),
inpainting_mask_invert: gr_show(is_inpaint),
img2img_interrogate: gr_show(not is_inpaint),
}
switch_mode.change(
apply_mode,
inputs=[switch_mode, mask_mode],
outputs=[
init_img,
init_img_with_mask,
init_img_with_mask_comment,
init_mask,
mask_mode,
mask_blur,
inpainting_fill,
sd_upscale_upscaler_name,
sd_upscale_overlap,
inpaint_full_res,
inpainting_mask_invert,
img2img_interrogate,
]
)
mask_mode.change(
lambda mode: {
init_img: gr_show(mode == 1),
init_img_with_mask: gr_show(mode == 0),
init_mask: gr_show(mode == 1),
},
inputs=[mask_mode],
outputs=[
init_img,
init_img_with_mask,
init_mask,
],
)
img2img_args = dict(
fn=img2img,
_js="submit",
inputs=[
img2img_prompt,
img2img_negative_prompt,
img2img_prompt_style,
img2img_prompt_style2,
init_img,
init_img_with_mask,
init_mask,
mask_mode,
steps,
sampler_index,
mask_blur,
inpainting_fill,
restore_faces,
tiling,
switch_mode,
batch_count,
batch_size,
cfg_scale,
denoising_strength,
seed,
subseed, subseed_strength, seed_resize_from_h, seed_resize_from_w,
height,
width,
resize_mode,
sd_upscale_upscaler_name,
sd_upscale_overlap,
inpaint_full_res,
inpainting_mask_invert,
] + custom_inputs,
outputs=[
img2img_gallery,
generation_info,
html_info
],
show_progress=False,
)
img2img_prompt.submit(**img2img_args)
submit.click(**img2img_args)
img2img_interrogate.click(
fn=interrogate,
inputs=[init_img],
outputs=[img2img_prompt],
)
interrupt.click(
fn=lambda: shared.state.interrupt(),
inputs=[],
outputs=[],
)
save.click(
fn=wrap_gradio_call(save_files),
_js="(x, y, z) => [x, y, selected_gallery_index()]",
inputs=[
generation_info,
img2img_gallery,
html_info
],
outputs=[
html_info,
html_info,
html_info,
]
)
roll.click(
fn=roll_artist,
inputs=[
img2img_prompt,
],
outputs=[
img2img_prompt,
]
)
prompts = [(txt2img_prompt, txt2img_negative_prompt), (img2img_prompt, img2img_negative_prompt)]
style_dropdowns = [(txt2img_prompt_style, txt2img_prompt_style2), (img2img_prompt_style, img2img_prompt_style2)]
dummy_component = gr.Label(visible=False)
for button, (prompt, negative_prompt) in zip([txt2img_save_style, img2img_save_style], prompts):
button.click(
fn=add_style,
_js="ask_for_style_name",
# Have to pass empty dummy component here, because the JavaScript and Python function have to accept
# the same number of parameters, but we only know the style-name after the JavaScript prompt
inputs=[dummy_component, prompt, negative_prompt],
outputs=[txt2img_prompt_style, img2img_prompt_style, txt2img_prompt_style2, img2img_prompt_style2],
)
for button, (prompt, negative_prompt), (style1, style2) in zip([txt2img_prompt_style_apply, img2img_prompt_style_apply], prompts, style_dropdowns):
button.click(
fn=apply_styles,
inputs=[prompt, negative_prompt, style1, style2],
outputs=[prompt, negative_prompt, style1, style2],
)
with gr.Blocks(analytics_enabled=False) as extras_interface:
with gr.Row().style(equal_height=False):
with gr.Column(variant='panel'):
with gr.Tabs():
with gr.TabItem('Single Image'):
image = gr.Image(label="Source", source="upload", interactive=True, type="pil")
with gr.TabItem('Batch Process'):
image_batch = gr.File(label="Batch Process", file_count="multiple", interactive=True, type="file")
upscaling_resize = gr.Slider(minimum=1.0, maximum=4.0, step=0.05, label="Resize", value=2)
with gr.Group():
extras_upscaler_1 = gr.Radio(label='Upscaler 1', choices=[x.name for x in shared.sd_upscalers], value=shared.sd_upscalers[0].name, type="index")
with gr.Group():
extras_upscaler_2 = gr.Radio(label='Upscaler 2', choices=[x.name for x in shared.sd_upscalers], value=shared.sd_upscalers[0].name, type="index")
extras_upscaler_2_visibility = gr.Slider(minimum=0.0, maximum=1.0, step=0.001, label="Upscaler 2 visibility", value=1)
with gr.Group():
gfpgan_visibility = gr.Slider(minimum=0.0, maximum=1.0, step=0.001, label="GFPGAN visibility", value=0, interactive=modules.gfpgan_model.have_gfpgan)
with gr.Group():
codeformer_visibility = gr.Slider(minimum=0.0, maximum=1.0, step=0.001, label="CodeFormer visibility", value=0, interactive=modules.codeformer_model.have_codeformer)
codeformer_weight = gr.Slider(minimum=0.0, maximum=1.0, step=0.001, label="CodeFormer weight (0 = maximum effect, 1 = minimum effect)", value=0, interactive=modules.codeformer_model.have_codeformer)
submit = gr.Button('Generate', elem_id="extras_generate", variant='primary')
with gr.Column(variant='panel'):
result_images = gr.Gallery(label="Result")
html_info_x = gr.HTML()
html_info = gr.HTML()
extras_args = dict(
fn=run_extras,
inputs=[
image,
image_batch,
gfpgan_visibility,
codeformer_visibility,
codeformer_weight,
upscaling_resize,
extras_upscaler_1,
extras_upscaler_2,
extras_upscaler_2_visibility,
],
outputs=[
result_images,
html_info_x,
html_info,
]
)
submit.click(**extras_args)
pnginfo_interface = gr.Interface(
wrap_gradio_call(run_pnginfo),
inputs=[
gr.Image(label="Source", source="upload", interactive=True, type="pil"),
],
outputs=[
gr.HTML(),
gr.HTML(),
gr.HTML(),
],
allow_flagging="never",
analytics_enabled=False,
)
def create_setting_component(key):
def fun():
return opts.data[key] if key in opts.data else opts.data_labels[key].default
info = opts.data_labels[key]
t = type(info.default)
args = info.component_args() if callable(info.component_args) else info.component_args
if info.component is not None:
comp = info.component
elif t == str:
comp = gr.Textbox
elif t == int:
comp = gr.Number
elif t == bool:
comp = gr.Checkbox
else:
raise Exception(f'bad options item type: {str(t)} for key {key}')
return comp(label=info.label, value=fun, **(args or {}))
components = []
keys = list(opts.data_labels.keys())
settings_cols = 3
items_per_col = math.ceil(len(keys) / settings_cols)
def run_settings(*args):
up = []
for key, value, comp in zip(opts.data_labels.keys(), args, components):
comp_args = opts.data_labels[key].component_args
if comp_args and isinstance(comp_args, dict) and comp_args.get('visible') is False:
continue
oldval = opts.data.get(key, None)
opts.data[key] = value
if oldval != value and opts.data_labels[key].onchange is not None:
opts.data_labels[key].onchange()
up.append(comp.update(value=value))
opts.save(shared.config_filename)
return 'Settings applied.'
with gr.Blocks(analytics_enabled=False) as settings_interface:
submit = gr.Button(value="Apply settings", variant='primary')
result = gr.HTML()
with gr.Row(elem_id="settings").style(equal_height=False):
for colno in range(settings_cols):
with gr.Column(variant='panel'):
for rowno in range(items_per_col):
index = rowno + colno * items_per_col
if index < len(keys):
components.append(create_setting_component(keys[index]))
submit.click(
fn=run_settings,
inputs=components,
outputs=[result]
)
interfaces = [
(txt2img_interface, "txt2img", "txt2img"),
(img2img_interface, "img2img", "img2img"),
(extras_interface, "Extras", "extras"),
(pnginfo_interface, "PNG Info", "pnginfo"),
(settings_interface, "Settings", "settings"),
]
with open(os.path.join(script_path, "style.css"), "r", encoding="utf8") as file:
css = file.read()
if os.path.exists(os.path.join(script_path, "user.css")):
with open(os.path.join(script_path, "user.css"), "r", encoding="utf8") as file:
usercss = file.read()
css += usercss
if not cmd_opts.no_progressbar_hiding:
css += css_hide_progressbar
with gr.Blocks(css=css, analytics_enabled=False, title="Stable Diffusion") as demo:
with gr.Tabs() as tabs:
for interface, label, ifid in interfaces:
with gr.TabItem(label, id=ifid):
interface.render()
tabs.change(
fn=lambda x: x,
inputs=[init_img_with_mask],
outputs=[init_img_with_mask],
)
send_to_img2img.click(
fn=lambda x: image_from_url_text(x),
_js="extract_image_from_gallery_img2img",
inputs=[txt2img_gallery],
outputs=[init_img],
)
send_to_inpaint.click(
fn=lambda x: image_from_url_text(x),
_js="extract_image_from_gallery_img2img",
inputs=[txt2img_gallery],
outputs=[init_img_with_mask],
)
img2img_send_to_img2img.click(
fn=lambda x: image_from_url_text(x),
_js="extract_image_from_gallery",
inputs=[img2img_gallery],
outputs=[init_img],
)
img2img_send_to_inpaint.click(
fn=lambda x: image_from_url_text(x),
_js="extract_image_from_gallery",
inputs=[img2img_gallery],
outputs=[init_img_with_mask],
)
send_to_extras.click(
fn=lambda x: image_from_url_text(x),
_js="extract_image_from_gallery_extras",
inputs=[txt2img_gallery],
outputs=[image],
)
img2img_send_to_extras.click(
fn=lambda x: image_from_url_text(x),
_js="extract_image_from_gallery_extras",
inputs=[img2img_gallery],
outputs=[image],
)
ui_config_file = cmd_opts.ui_config_file
ui_settings = {}
settings_count = len(ui_settings)
error_loading = False
try:
if os.path.exists(ui_config_file):
with open(ui_config_file, "r", encoding="utf8") as file:
ui_settings = json.load(file)
except Exception:
error_loading = True
print("Error loading settings:", file=sys.stderr)
print(traceback.format_exc(), file=sys.stderr)
def loadsave(path, x):
def apply_field(obj, field, condition=None):
key = path + "/" + field
saved_value = ui_settings.get(key, None)
if saved_value is None:
ui_settings[key] = getattr(obj, field)
elif condition is None or condition(saved_value):
setattr(obj, field, saved_value)
if type(x) == gr.Slider:
apply_field(x, 'value')
apply_field(x, 'minimum')
apply_field(x, 'maximum')
apply_field(x, 'step')
if type(x) == gr.Radio:
apply_field(x, 'value', lambda val: val in x.choices)
visit(txt2img_interface, loadsave, "txt2img")
visit(img2img_interface, loadsave, "img2img")
visit(extras_interface, loadsave, "extras")
if not error_loading and (not os.path.exists(ui_config_file) or settings_count != len(ui_settings)):
with open(ui_config_file, "w", encoding="utf8") as file:
json.dump(ui_settings, file, indent=4)
return demo
with open(os.path.join(script_path, "script.js"), "r", encoding="utf8") as jsfile:
javascript = f'<script>{jsfile.read()}</script>'
jsdir = os.path.join(script_path, "javascript")
for filename in os.listdir(jsdir):
with open(os.path.join(jsdir, filename), "r", encoding="utf8") as jsfile:
javascript += f"\n<script>{jsfile.read()}</script>"
def template_response(*args, **kwargs):
res = gradio_routes_templates_response(*args, **kwargs)
res.body = res.body.replace(b'</head>', f'{javascript}</head>'.encode("utf8"))
res.init_headers()
return res
gradio_routes_templates_response = gradio.routes.templates.TemplateResponse
gradio.routes.templates.TemplateResponse = template_response
<|code_end|>
|
modules/memmon.py
<|code_start|>import threading
import time
from collections import defaultdict
import torch
class MemUsageMonitor(threading.Thread):
run_flag = None
device = None
disabled = False
opts = None
data = None
def __init__(self, name, device, opts):
threading.Thread.__init__(self)
self.name = name
self.device = device
self.opts = opts
self.daemon = True
self.run_flag = threading.Event()
self.data = defaultdict(int)
try:
torch.cuda.mem_get_info()
torch.cuda.memory_stats(self.device)
except Exception as e: # AMD or whatever
print(f"Warning: caught exception '{e}', memory monitor disabled")
self.disabled = True
def run(self):
if self.disabled:
return
while True:
self.run_flag.wait()
torch.cuda.reset_peak_memory_stats()
self.data.clear()
if self.opts.memmon_poll_rate <= 0:
self.run_flag.clear()
continue
self.data["min_free"] = torch.cuda.mem_get_info()[0]
while self.run_flag.is_set():
free, total = torch.cuda.mem_get_info() # calling with self.device errors, torch bug?
self.data["min_free"] = min(self.data["min_free"], free)
time.sleep(1 / self.opts.memmon_poll_rate)
def dump_debug(self):
print(self, 'recorded data:')
for k, v in self.read().items():
print(k, -(v // -(1024 ** 2)))
print(self, 'raw torch memory stats:')
tm = torch.cuda.memory_stats(self.device)
for k, v in tm.items():
if 'bytes' not in k:
continue
print('\t' if 'peak' in k else '', k, -(v // -(1024 ** 2)))
print(torch.cuda.memory_summary())
def monitor(self):
self.run_flag.set()
def read(self):
if not self.disabled:
free, total = torch.cuda.mem_get_info()
self.data["total"] = total
torch_stats = torch.cuda.memory_stats(self.device)
self.data["active_peak"] = torch_stats["active_bytes.all.peak"]
self.data["reserved_peak"] = torch_stats["reserved_bytes.all.peak"]
self.data["system_peak"] = total - self.data["min_free"]
return self.data
def stop(self):
self.run_flag.clear()
return self.read()
<|code_end|>
modules/ui.py
<|code_start|>import base64
import html
import io
import json
import math
import mimetypes
import os
import random
import sys
import time
import traceback
import numpy as np
import torch
from PIL import Image
import gradio as gr
import gradio.utils
import gradio.routes
from modules.paths import script_path
from modules.shared import opts, cmd_opts
import modules.shared as shared
from modules.sd_samplers import samplers, samplers_for_img2img
import modules.realesrgan_model as realesrgan
import modules.scripts
import modules.gfpgan_model
import modules.codeformer_model
import modules.styles
# this is a fix for Windows users. Without it, javascript files will be served with text/html content-type and the bowser will not show any UI
mimetypes.init()
mimetypes.add_type('application/javascript', '.js')
if not cmd_opts.share and not cmd_opts.listen:
# fix gradio phoning home
gradio.utils.version_check = lambda: None
gradio.utils.get_local_ip_address = lambda: '127.0.0.1'
def gr_show(visible=True):
return {"visible": visible, "__type__": "update"}
sample_img2img = "assets/stable-samples/img2img/sketch-mountains-input.jpg"
sample_img2img = sample_img2img if os.path.exists(sample_img2img) else None
css_hide_progressbar = """
.wrap .m-12 svg { display:none!important; }
.wrap .m-12::before { content:"Loading..." }
.progress-bar { display:none!important; }
.meta-text { display:none!important; }
"""
def plaintext_to_html(text):
text = "<p>" + "<br>\n".join([f"{html.escape(x)}" for x in text.split('\n')]) + "</p>"
return text
def image_from_url_text(filedata):
if type(filedata) == list:
if len(filedata) == 0:
return None
filedata = filedata[0]
if filedata.startswith("data:image/png;base64,"):
filedata = filedata[len("data:image/png;base64,"):]
filedata = base64.decodebytes(filedata.encode('utf-8'))
image = Image.open(io.BytesIO(filedata))
return image
def send_gradio_gallery_to_image(x):
if len(x) == 0:
return None
return image_from_url_text(x[0])
def save_files(js_data, images, index):
import csv
os.makedirs(opts.outdir_save, exist_ok=True)
filenames = []
data = json.loads(js_data)
if index > -1 and opts.save_selected_only and (index > 0 or not opts.return_grid): # ensures we are looking at a specific non-grid picture, and we have save_selected_only
images = [images[index]]
data["seed"] += (index - 1 if opts.return_grid else index)
with open(os.path.join(opts.outdir_save, "log.csv"), "a", encoding="utf8", newline='') as file:
at_start = file.tell() == 0
writer = csv.writer(file)
if at_start:
writer.writerow(["prompt", "seed", "width", "height", "sampler", "cfgs", "steps", "filename", "negative_prompt"])
filename_base = str(int(time.time() * 1000))
for i, filedata in enumerate(images):
filename = filename_base + ("" if len(images) == 1 else "-" + str(i + 1)) + ".png"
filepath = os.path.join(opts.outdir_save, filename)
if filedata.startswith("data:image/png;base64,"):
filedata = filedata[len("data:image/png;base64,"):]
with open(filepath, "wb") as imgfile:
imgfile.write(base64.decodebytes(filedata.encode('utf-8')))
filenames.append(filename)
writer.writerow([data["prompt"], data["seed"], data["width"], data["height"], data["sampler"], data["cfg_scale"], data["steps"], filenames[0], data["negative_prompt"]])
return '', '', plaintext_to_html(f"Saved: {filenames[0]}")
def wrap_gradio_call(func):
def f(*args, **kwargs):
run_memmon = opts.memmon_poll_rate > 0 and not shared.mem_mon.disabled
if run_memmon:
shared.mem_mon.monitor()
t = time.perf_counter()
try:
res = list(func(*args, **kwargs))
except Exception as e:
print("Error completing request", file=sys.stderr)
print("Arguments:", args, kwargs, file=sys.stderr)
print(traceback.format_exc(), file=sys.stderr)
shared.state.job = ""
shared.state.job_count = 0
res = [None, '', f"<div class='error'>{plaintext_to_html(type(e).__name__+': '+str(e))}</div>"]
elapsed = time.perf_counter() - t
if run_memmon:
mem_stats = {k: -(v//-(1024*1024)) for k, v in shared.mem_mon.stop().items()}
active_peak = mem_stats['active_peak']
reserved_peak = mem_stats['reserved_peak']
sys_peak = mem_stats['system_peak']
sys_total = mem_stats['total']
sys_pct = round(sys_peak/max(sys_total, 1) * 100, 2)
vram_tooltip = "Torch active: Peak amount of VRAM used by Torch during generation, excluding cached data.
" \
"Torch reserved: Peak amount of VRAM allocated by Torch, including all active and cached data.
" \
"Sys VRAM: Peak amount of VRAM allocation across all applications / total GPU VRAM (peak utilization%)."
vram_html = f"<p class='vram' title='{vram_tooltip}'>Torch active/reserved: {active_peak}/{reserved_peak} MiB, <wbr>Sys VRAM: {sys_peak}/{sys_total} MiB ({sys_pct}%)</p>"
else:
vram_html = ''
# last item is always HTML
res[-1] += f"<div class='performance'><p class='time'>Time taken: <wbr>{elapsed:.2f}s</p>{vram_html}</div>"
shared.state.interrupted = False
return tuple(res)
return f
def check_progress_call():
if shared.state.job_count == 0:
return "", gr_show(False), gr_show(False)
progress = 0
if shared.state.job_count > 0:
progress += shared.state.job_no / shared.state.job_count
if shared.state.sampling_steps > 0:
progress += 1 / shared.state.job_count * shared.state.sampling_step / shared.state.sampling_steps
progress = min(progress, 1)
progressbar = ""
if opts.show_progressbar:
progressbar = f"""<div class='progressDiv'><div class='progress' style="width:{progress * 100}%">{str(int(progress*100))+"%" if progress > 0.01 else ""}</div></div>"""
image = gr_show(False)
preview_visibility = gr_show(False)
if opts.show_progress_every_n_steps > 0:
if shared.parallel_processing_allowed:
if shared.state.sampling_step - shared.state.current_image_sampling_step >= opts.show_progress_every_n_steps and shared.state.current_latent is not None:
shared.state.current_image = modules.sd_samplers.sample_to_image(shared.state.current_latent)
shared.state.current_image_sampling_step = shared.state.sampling_step
image = shared.state.current_image
if image is None or progress >= 1:
image = gr.update(value=None)
else:
preview_visibility = gr_show(True)
return f"<span style='display: none'>{time.time()}</span><p>{progressbar}</p>", preview_visibility, image
def roll_artist(prompt):
allowed_cats = set([x for x in shared.artist_db.categories() if len(opts.random_artist_categories)==0 or x in opts.random_artist_categories])
artist = random.choice([x for x in shared.artist_db.artists if x.category in allowed_cats])
return prompt + ", " + artist.name if prompt != '' else artist.name
def visit(x, func, path=""):
if hasattr(x, 'children'):
for c in x.children:
visit(c, func, path)
elif x.label is not None:
func(path + "/" + str(x.label), x)
def create_seed_inputs():
with gr.Row():
seed = gr.Number(label='Seed', value=-1)
subseed = gr.Number(label='Variation seed', value=-1, visible=False)
seed_checkbox = gr.Checkbox(label="Extra", elem_id="subseed_show", value=False)
with gr.Row():
subseed_strength = gr.Slider(label='Variation strength', value=0.0, minimum=0, maximum=1, step=0.01, visible=False)
seed_resize_from_w = gr.Slider(minimum=0, maximum=2048, step=64, label="Resize seed from width", value=0, visible=False)
seed_resize_from_h = gr.Slider(minimum=0, maximum=2048, step=64, label="Resize seed from height", value=0, visible=False)
def change_visiblity(show):
return {
subseed: gr_show(show),
subseed_strength: gr_show(show),
seed_resize_from_h: gr_show(show),
seed_resize_from_w: gr_show(show),
}
seed_checkbox.change(
change_visiblity,
inputs=[seed_checkbox],
outputs=[
subseed,
subseed_strength,
seed_resize_from_h,
seed_resize_from_w
]
)
return seed, subseed, subseed_strength, seed_resize_from_h, seed_resize_from_w
def add_style(name: str, prompt: str, negative_prompt: str):
if name is None:
return [gr_show(), gr_show()]
style = modules.styles.PromptStyle(name, prompt, negative_prompt)
shared.prompt_styles.styles[style.name] = style
# Save all loaded prompt styles: this allows us to update the storage format in the future more easily, because we
# reserialize all styles every time we save them
shared.prompt_styles.save_styles(shared.styles_filename)
update = {"visible": True, "choices": list(shared.prompt_styles.styles), "__type__": "update"}
return [update, update, update, update]
def apply_styles(prompt, prompt_neg, style1_name, style2_name):
prompt = shared.prompt_styles.apply_styles_to_prompt(prompt, [style1_name, style2_name])
prompt_neg = shared.prompt_styles.apply_negative_styles_to_prompt(prompt_neg, [style1_name, style2_name])
return [gr.Textbox.update(value=prompt), gr.Textbox.update(value=prompt_neg), gr.Dropdown.update(value="None"), gr.Dropdown.update(value="None")]
def interrogate(image):
prompt = shared.interrogator.interrogate(image)
return gr_show(True) if prompt is None else prompt
def create_toprow(is_img2img):
with gr.Row(elem_id="toprow"):
with gr.Column(scale=4):
with gr.Row():
with gr.Column(scale=8):
with gr.Row():
prompt = gr.Textbox(label="Prompt", elem_id="prompt", show_label=False, placeholder="Prompt", lines=2)
roll = gr.Button('Roll', elem_id="roll", visible=len(shared.artist_db.artists) > 0)
with gr.Column(scale=1, elem_id="style_pos_col"):
prompt_style = gr.Dropdown(label="Style 1", elem_id="style_index", choices=[k for k, v in shared.prompt_styles.styles.items()], value=next(iter(shared.prompt_styles.styles.keys())), visible=len(shared.prompt_styles.styles) > 1)
with gr.Row():
with gr.Column(scale=8):
negative_prompt = gr.Textbox(label="Negative prompt", elem_id="negative_prompt", show_label=False, placeholder="Negative prompt", lines=2)
with gr.Column(scale=1, elem_id="style_neg_col"):
prompt_style2 = gr.Dropdown(label="Style 2", elem_id="style2_index", choices=[k for k, v in shared.prompt_styles.styles.items()], value=next(iter(shared.prompt_styles.styles.keys())), visible=len(shared.prompt_styles.styles) > 1)
with gr.Column(scale=1):
with gr.Row():
submit = gr.Button('Generate', elem_id="generate", variant='primary')
with gr.Row():
if is_img2img:
interrogate = gr.Button('Interrogate', elem_id="interrogate")
else:
interrogate = None
prompt_style_apply = gr.Button('Apply style', elem_id="style_apply")
save_style = gr.Button('Create style', elem_id="style_create")
check_progress = gr.Button('Check progress', elem_id="check_progress", visible=False)
return prompt, roll, prompt_style, negative_prompt, prompt_style2, submit, interrogate, prompt_style_apply, save_style, check_progress
def create_ui(txt2img, img2img, run_extras, run_pnginfo):
with gr.Blocks(analytics_enabled=False) as txt2img_interface:
txt2img_prompt, roll, txt2img_prompt_style, txt2img_negative_prompt, txt2img_prompt_style2, submit, _, txt2img_prompt_style_apply, txt2img_save_style, check_progress = create_toprow(is_img2img=False)
with gr.Row().style(equal_height=False):
with gr.Column(variant='panel'):
steps = gr.Slider(minimum=1, maximum=150, step=1, label="Sampling Steps", value=20)
sampler_index = gr.Radio(label='Sampling method', elem_id="txt2img_sampling", choices=[x.name for x in samplers], value=samplers[0].name, type="index")
with gr.Row():
restore_faces = gr.Checkbox(label='Restore faces', value=False, visible=len(shared.face_restorers) > 1)
tiling = gr.Checkbox(label='Tiling', value=False)
with gr.Row():
batch_count = gr.Slider(minimum=1, maximum=cmd_opts.max_batch_count, step=1, label='Batch count', value=1)
batch_size = gr.Slider(minimum=1, maximum=8, step=1, label='Batch size', value=1)
cfg_scale = gr.Slider(minimum=1.0, maximum=30.0, step=0.5, label='CFG Scale', value=7.0)
with gr.Group():
width = gr.Slider(minimum=64, maximum=2048, step=64, label="Width", value=512)
height = gr.Slider(minimum=64, maximum=2048, step=64, label="Height", value=512)
seed, subseed, subseed_strength, seed_resize_from_h, seed_resize_from_w = create_seed_inputs()
with gr.Group():
custom_inputs = modules.scripts.scripts_txt2img.setup_ui(is_img2img=False)
with gr.Column(variant='panel'):
progressbar = gr.HTML(elem_id="progressbar")
with gr.Group():
txt2img_preview = gr.Image(elem_id='txt2img_preview', visible=False)
txt2img_gallery = gr.Gallery(label='Output', elem_id='txt2img_gallery').style(grid=4)
with gr.Group():
with gr.Row():
save = gr.Button('Save')
send_to_img2img = gr.Button('Send to img2img')
send_to_inpaint = gr.Button('Send to inpaint')
send_to_extras = gr.Button('Send to extras')
interrupt = gr.Button('Interrupt')
with gr.Group():
html_info = gr.HTML()
generation_info = gr.Textbox(visible=False)
txt2img_args = dict(
fn=txt2img,
_js="submit",
inputs=[
txt2img_prompt,
txt2img_negative_prompt,
txt2img_prompt_style,
txt2img_prompt_style2,
steps,
sampler_index,
restore_faces,
tiling,
batch_count,
batch_size,
cfg_scale,
seed,
subseed, subseed_strength, seed_resize_from_h, seed_resize_from_w,
height,
width,
] + custom_inputs,
outputs=[
txt2img_gallery,
generation_info,
html_info
]
)
txt2img_prompt.submit(**txt2img_args)
submit.click(**txt2img_args)
check_progress.click(
fn=check_progress_call,
show_progress=False,
inputs=[],
outputs=[progressbar, txt2img_preview, txt2img_preview],
)
interrupt.click(
fn=lambda: shared.state.interrupt(),
inputs=[],
outputs=[],
)
save.click(
fn=wrap_gradio_call(save_files),
_js = "(x, y, z) => [x, y, selected_gallery_index()]",
inputs=[
generation_info,
txt2img_gallery,
html_info
],
outputs=[
html_info,
html_info,
html_info,
]
)
roll.click(
fn=roll_artist,
inputs=[
txt2img_prompt,
],
outputs=[
txt2img_prompt,
]
)
with gr.Blocks(analytics_enabled=False) as img2img_interface:
img2img_prompt, roll, img2img_prompt_style, img2img_negative_prompt, img2img_prompt_style2, submit, img2img_interrogate, img2img_prompt_style_apply, img2img_save_style, check_progress = create_toprow(is_img2img=True)
with gr.Row().style(equal_height=False):
with gr.Column(variant='panel'):
with gr.Group():
switch_mode = gr.Radio(label='Mode', elem_id="img2img_mode", choices=['Redraw whole image', 'Inpaint a part of image', 'SD upscale'], value='Redraw whole image', type="index", show_label=False)
init_img = gr.Image(label="Image for img2img", source="upload", interactive=True, type="pil")
init_img_with_mask = gr.Image(label="Image for inpainting with mask", elem_id="img2maskimg", source="upload", interactive=True, type="pil", tool="sketch", visible=False, image_mode="RGBA")
init_mask = gr.Image(label="Mask", source="upload", interactive=True, type="pil", visible=False)
init_img_with_mask_comment = gr.HTML(elem_id="mask_bug_info", value="<small>if the editor shows ERROR, switch to another tab and back, then to another img2img mode above and back</small>", visible=False)
with gr.Row():
resize_mode = gr.Radio(label="Resize mode", elem_id="resize_mode", show_label=False, choices=["Just resize", "Crop and resize", "Resize and fill"], type="index", value="Just resize")
mask_mode = gr.Radio(label="Mask mode", show_label=False, choices=["Draw mask", "Upload mask"], type="index", value="Draw mask")
steps = gr.Slider(minimum=1, maximum=150, step=1, label="Sampling Steps", value=20)
sampler_index = gr.Radio(label='Sampling method', choices=[x.name for x in samplers_for_img2img], value=samplers_for_img2img[0].name, type="index")
mask_blur = gr.Slider(label='Mask blur', minimum=0, maximum=64, step=1, value=4, visible=False)
inpainting_fill = gr.Radio(label='Masked content', choices=['fill', 'original', 'latent noise', 'latent nothing'], value='fill', type="index", visible=False)
with gr.Row():
inpaint_full_res = gr.Checkbox(label='Inpaint at full resolution', value=False, visible=False)
inpainting_mask_invert = gr.Radio(label='Masking mode', choices=['Inpaint masked', 'Inpaint not masked'], value='Inpaint masked', type="index", visible=False)
with gr.Row():
restore_faces = gr.Checkbox(label='Restore faces', value=False, visible=len(shared.face_restorers) > 1)
tiling = gr.Checkbox(label='Tiling', value=False)
sd_upscale_overlap = gr.Slider(minimum=0, maximum=256, step=16, label='Tile overlap', value=64, visible=False)
with gr.Row():
sd_upscale_upscaler_name = gr.Radio(label='Upscaler', choices=[x.name for x in shared.sd_upscalers], value=shared.sd_upscalers[0].name, type="index", visible=False)
with gr.Row():
batch_count = gr.Slider(minimum=1, maximum=cmd_opts.max_batch_count, step=1, label='Batch count', value=1)
batch_size = gr.Slider(minimum=1, maximum=8, step=1, label='Batch size', value=1)
with gr.Group():
cfg_scale = gr.Slider(minimum=1.0, maximum=30.0, step=0.5, label='CFG Scale', value=7.0)
denoising_strength = gr.Slider(minimum=0.0, maximum=1.0, step=0.01, label='Denoising strength', value=0.75)
with gr.Group():
width = gr.Slider(minimum=64, maximum=2048, step=64, label="Width", value=512)
height = gr.Slider(minimum=64, maximum=2048, step=64, label="Height", value=512)
seed, subseed, subseed_strength, seed_resize_from_h, seed_resize_from_w = create_seed_inputs()
with gr.Group():
custom_inputs = modules.scripts.scripts_img2img.setup_ui(is_img2img=True)
with gr.Column(variant='panel'):
progressbar = gr.HTML(elem_id="progressbar")
with gr.Group():
img2img_preview = gr.Image(elem_id='img2img_preview', visible=False)
img2img_gallery = gr.Gallery(label='Output', elem_id='img2img_gallery').style(grid=4)
with gr.Group():
with gr.Row():
save = gr.Button('Save')
img2img_send_to_img2img = gr.Button('Send to img2img')
img2img_send_to_inpaint = gr.Button('Send to inpaint')
img2img_send_to_extras = gr.Button('Send to extras')
interrupt = gr.Button('Interrupt')
img2img_save_style = gr.Button('Save prompt as style')
with gr.Group():
html_info = gr.HTML()
generation_info = gr.Textbox(visible=False)
def apply_mode(mode, uploadmask):
is_classic = mode == 0
is_inpaint = mode == 1
is_upscale = mode == 2
return {
init_img: gr_show(not is_inpaint or (is_inpaint and uploadmask == 1)),
init_img_with_mask: gr_show(is_inpaint and uploadmask == 0),
init_img_with_mask_comment: gr_show(is_inpaint and uploadmask == 0),
init_mask: gr_show(is_inpaint and uploadmask == 1),
mask_mode: gr_show(is_inpaint),
mask_blur: gr_show(is_inpaint),
inpainting_fill: gr_show(is_inpaint),
sd_upscale_upscaler_name: gr_show(is_upscale),
sd_upscale_overlap: gr_show(is_upscale),
inpaint_full_res: gr_show(is_inpaint),
inpainting_mask_invert: gr_show(is_inpaint),
img2img_interrogate: gr_show(not is_inpaint),
}
switch_mode.change(
apply_mode,
inputs=[switch_mode, mask_mode],
outputs=[
init_img,
init_img_with_mask,
init_img_with_mask_comment,
init_mask,
mask_mode,
mask_blur,
inpainting_fill,
sd_upscale_upscaler_name,
sd_upscale_overlap,
inpaint_full_res,
inpainting_mask_invert,
img2img_interrogate,
]
)
mask_mode.change(
lambda mode: {
init_img: gr_show(mode == 1),
init_img_with_mask: gr_show(mode == 0),
init_mask: gr_show(mode == 1),
},
inputs=[mask_mode],
outputs=[
init_img,
init_img_with_mask,
init_mask,
],
)
img2img_args = dict(
fn=img2img,
_js="submit",
inputs=[
img2img_prompt,
img2img_negative_prompt,
img2img_prompt_style,
img2img_prompt_style2,
init_img,
init_img_with_mask,
init_mask,
mask_mode,
steps,
sampler_index,
mask_blur,
inpainting_fill,
restore_faces,
tiling,
switch_mode,
batch_count,
batch_size,
cfg_scale,
denoising_strength,
seed,
subseed, subseed_strength, seed_resize_from_h, seed_resize_from_w,
height,
width,
resize_mode,
sd_upscale_upscaler_name,
sd_upscale_overlap,
inpaint_full_res,
inpainting_mask_invert,
] + custom_inputs,
outputs=[
img2img_gallery,
generation_info,
html_info
]
)
img2img_prompt.submit(**img2img_args)
submit.click(**img2img_args)
img2img_interrogate.click(
fn=interrogate,
inputs=[init_img],
outputs=[img2img_prompt],
)
check_progress.click(
fn=check_progress_call,
show_progress=False,
inputs=[],
outputs=[progressbar, img2img_preview, img2img_preview],
)
interrupt.click(
fn=lambda: shared.state.interrupt(),
inputs=[],
outputs=[],
)
save.click(
fn=wrap_gradio_call(save_files),
_js = "(x, y, z) => [x, y, selected_gallery_index()]",
inputs=[
generation_info,
img2img_gallery,
html_info
],
outputs=[
html_info,
html_info,
html_info,
]
)
roll.click(
fn=roll_artist,
inputs=[
img2img_prompt,
],
outputs=[
img2img_prompt,
]
)
prompts = [(txt2img_prompt, txt2img_negative_prompt), (img2img_prompt, img2img_negative_prompt)]
style_dropdowns = [(txt2img_prompt_style, txt2img_prompt_style2), (img2img_prompt_style, img2img_prompt_style2)]
dummy_component = gr.Label(visible=False)
for button, (prompt, negative_prompt) in zip([txt2img_save_style, img2img_save_style], prompts):
button.click(
fn=add_style,
_js="ask_for_style_name",
# Have to pass empty dummy component here, because the JavaScript and Python function have to accept
# the same number of parameters, but we only know the style-name after the JavaScript prompt
inputs=[dummy_component, prompt, negative_prompt],
outputs=[txt2img_prompt_style, img2img_prompt_style, txt2img_prompt_style2, img2img_prompt_style2],
)
for button, (prompt, negative_prompt), (style1, style2) in zip([txt2img_prompt_style_apply, img2img_prompt_style_apply], prompts, style_dropdowns):
button.click(
fn=apply_styles,
inputs=[prompt, negative_prompt, style1, style2],
outputs=[prompt, negative_prompt, style1, style2],
)
with gr.Blocks(analytics_enabled=False) as extras_interface:
with gr.Row().style(equal_height=False):
with gr.Column(variant='panel'):
with gr.Tabs():
with gr.TabItem('Single Image'):
image = gr.Image(label="Source", source="upload", interactive=True, type="pil")
with gr.TabItem('Batch Process'):
image_batch = gr.File(label="Batch Process", file_count="multiple", interactive=True, type="file")
upscaling_resize = gr.Slider(minimum=1.0, maximum=4.0, step=0.05, label="Resize", value=2)
with gr.Group():
extras_upscaler_1 = gr.Radio(label='Upscaler 1', choices=[x.name for x in shared.sd_upscalers], value=shared.sd_upscalers[0].name, type="index")
with gr.Group():
extras_upscaler_2 = gr.Radio(label='Upscaler 2', choices=[x.name for x in shared.sd_upscalers], value=shared.sd_upscalers[0].name, type="index")
extras_upscaler_2_visibility = gr.Slider(minimum=0.0, maximum=1.0, step=0.001, label="Upscaler 2 visibility", value=1)
with gr.Group():
gfpgan_visibility = gr.Slider(minimum=0.0, maximum=1.0, step=0.001, label="GFPGAN visibility", value=0, interactive=modules.gfpgan_model.have_gfpgan)
with gr.Group():
codeformer_visibility = gr.Slider(minimum=0.0, maximum=1.0, step=0.001, label="CodeFormer visibility", value=0, interactive=modules.codeformer_model.have_codeformer)
codeformer_weight = gr.Slider(minimum=0.0, maximum=1.0, step=0.001, label="CodeFormer weight (0 = maximum effect, 1 = minimum effect)", value=0, interactive=modules.codeformer_model.have_codeformer)
submit = gr.Button('Generate', elem_id="extras_generate", variant='primary')
with gr.Column(variant='panel'):
result_images = gr.Gallery(label="Result")
html_info_x = gr.HTML()
html_info = gr.HTML()
extras_args = dict(
fn=run_extras,
inputs=[
image,
image_batch,
gfpgan_visibility,
codeformer_visibility,
codeformer_weight,
upscaling_resize,
extras_upscaler_1,
extras_upscaler_2,
extras_upscaler_2_visibility,
],
outputs=[
result_images,
html_info_x,
html_info,
]
)
submit.click(**extras_args)
pnginfo_interface = gr.Interface(
wrap_gradio_call(run_pnginfo),
inputs=[
gr.Image(label="Source", source="upload", interactive=True, type="pil"),
],
outputs=[
gr.HTML(),
gr.HTML(),
gr.HTML(),
],
allow_flagging="never",
analytics_enabled=False,
)
def create_setting_component(key):
def fun():
return opts.data[key] if key in opts.data else opts.data_labels[key].default
info = opts.data_labels[key]
t = type(info.default)
args = info.component_args() if callable(info.component_args) else info.component_args
if info.component is not None:
comp = info.component
elif t == str:
comp = gr.Textbox
elif t == int:
comp = gr.Number
elif t == bool:
comp = gr.Checkbox
else:
raise Exception(f'bad options item type: {str(t)} for key {key}')
return comp(label=info.label, value=fun, **(args or {}))
components = []
keys = list(opts.data_labels.keys())
settings_cols = 3
items_per_col = math.ceil(len(keys) / settings_cols)
def run_settings(*args):
up = []
for key, value, comp in zip(opts.data_labels.keys(), args, components):
comp_args = opts.data_labels[key].component_args
if comp_args and isinstance(comp_args, dict) and comp_args.get('visible') is False:
continue
oldval = opts.data.get(key, None)
opts.data[key] = value
if oldval != value and opts.data_labels[key].onchange is not None:
opts.data_labels[key].onchange()
up.append(comp.update(value=value))
opts.save(shared.config_filename)
return 'Settings applied.'
with gr.Blocks(analytics_enabled=False) as settings_interface:
submit = gr.Button(value="Apply settings", variant='primary')
result = gr.HTML()
with gr.Row(elem_id="settings").style(equal_height=False):
for colno in range(settings_cols):
with gr.Column(variant='panel'):
for rowno in range(items_per_col):
index = rowno + colno * items_per_col
if index < len(keys):
components.append(create_setting_component(keys[index]))
submit.click(
fn=run_settings,
inputs=components,
outputs=[result]
)
interfaces = [
(txt2img_interface, "txt2img", "txt2img"),
(img2img_interface, "img2img", "img2img"),
(extras_interface, "Extras", "extras"),
(pnginfo_interface, "PNG Info", "pnginfo"),
(settings_interface, "Settings", "settings"),
]
with open(os.path.join(script_path, "style.css"), "r", encoding="utf8") as file:
css = file.read()
if os.path.exists(os.path.join(script_path, "user.css")):
with open(os.path.join(script_path, "user.css"), "r", encoding="utf8") as file:
usercss = file.read()
css += usercss
if not cmd_opts.no_progressbar_hiding:
css += css_hide_progressbar
with gr.Blocks(css=css, analytics_enabled=False, title="Stable Diffusion") as demo:
with gr.Tabs() as tabs:
for interface, label, ifid in interfaces:
with gr.TabItem(label, id=ifid):
interface.render()
tabs.change(
fn=lambda x: x,
inputs=[init_img_with_mask],
outputs=[init_img_with_mask],
)
send_to_img2img.click(
fn=lambda x: image_from_url_text(x),
_js="extract_image_from_gallery_img2img",
inputs=[txt2img_gallery],
outputs=[init_img],
)
send_to_inpaint.click(
fn=lambda x: image_from_url_text(x),
_js="extract_image_from_gallery_img2img",
inputs=[txt2img_gallery],
outputs=[init_img_with_mask],
)
img2img_send_to_img2img.click(
fn=lambda x: image_from_url_text(x),
_js="extract_image_from_gallery",
inputs=[img2img_gallery],
outputs=[init_img],
)
img2img_send_to_inpaint.click(
fn=lambda x: image_from_url_text(x),
_js="extract_image_from_gallery",
inputs=[img2img_gallery],
outputs=[init_img_with_mask],
)
send_to_extras.click(
fn=lambda x: image_from_url_text(x),
_js="extract_image_from_gallery_extras",
inputs=[txt2img_gallery],
outputs=[image],
)
img2img_send_to_extras.click(
fn=lambda x: image_from_url_text(x),
_js="extract_image_from_gallery_extras",
inputs=[img2img_gallery],
outputs=[image],
)
ui_config_file = cmd_opts.ui_config_file
ui_settings = {}
settings_count = len(ui_settings)
error_loading = False
try:
if os.path.exists(ui_config_file):
with open(ui_config_file, "r", encoding="utf8") as file:
ui_settings = json.load(file)
except Exception:
error_loading = True
print("Error loading settings:", file=sys.stderr)
print(traceback.format_exc(), file=sys.stderr)
def loadsave(path, x):
def apply_field(obj, field, condition=None):
key = path + "/" + field
saved_value = ui_settings.get(key, None)
if saved_value is None:
ui_settings[key] = getattr(obj, field)
elif condition is None or condition(saved_value):
setattr(obj, field, saved_value)
if type(x) == gr.Slider:
apply_field(x, 'value')
apply_field(x, 'minimum')
apply_field(x, 'maximum')
apply_field(x, 'step')
if type(x) == gr.Radio:
apply_field(x, 'value', lambda val: val in x.choices)
visit(txt2img_interface, loadsave, "txt2img")
visit(img2img_interface, loadsave, "img2img")
visit(extras_interface, loadsave, "extras")
if not error_loading and (not os.path.exists(ui_config_file) or settings_count != len(ui_settings)):
with open(ui_config_file, "w", encoding="utf8") as file:
json.dump(ui_settings, file, indent=4)
return demo
with open(os.path.join(script_path, "script.js"), "r", encoding="utf8") as jsfile:
javascript = f'<script>{jsfile.read()}</script>'
jsdir = os.path.join(script_path, "javascript")
for filename in os.listdir(jsdir):
with open(os.path.join(jsdir, filename), "r", encoding="utf8") as jsfile:
javascript += f"\n<script>{jsfile.read()}</script>"
def template_response(*args, **kwargs):
res = gradio_routes_templates_response(*args, **kwargs)
res.body = res.body.replace(b'</head>', f'{javascript}</head>'.encode("utf8"))
res.init_headers()
return res
gradio_routes_templates_response = gradio.routes.templates.TemplateResponse
gradio.routes.templates.TemplateResponse = template_response
<|code_end|>
|
[Feature Request] Automatic link opening
A parameter that allows you to automatically open a UI link in the browser would be very convenient!
|
modules/shared.py
<|code_start|>import sys
import argparse
import json
import os
import gradio as gr
import torch
import tqdm
import modules.artists
from modules.paths import script_path, sd_path
from modules.devices import get_optimal_device
import modules.styles
import modules.interrogate
import modules.memmon
import modules.sd_models
sd_model_file = os.path.join(script_path, 'model.ckpt')
default_sd_model_file = sd_model_file
parser = argparse.ArgumentParser()
parser.add_argument("--config", type=str, default=os.path.join(sd_path, "configs/stable-diffusion/v1-inference.yaml"), help="path to config which constructs model",)
parser.add_argument("--ckpt", type=str, default=sd_model_file, help="path to checkpoint of stable diffusion model; this checkpoint will be added to the list of checkpoints and loaded by default if you don't have a checkpoint selected in settings",)
parser.add_argument("--ckpt-dir", type=str, default=os.path.join(script_path, 'models'), help="path to directory with stable diffusion checkpoints",)
parser.add_argument("--gfpgan-dir", type=str, help="GFPGAN directory", default=('./src/gfpgan' if os.path.exists('./src/gfpgan') else './GFPGAN'))
parser.add_argument("--gfpgan-model", type=str, help="GFPGAN model file name", default='GFPGANv1.3.pth')
parser.add_argument("--no-half", action='store_true', help="do not switch the model to 16-bit floats")
parser.add_argument("--no-progressbar-hiding", action='store_true', help="do not hide progressbar in gradio UI (we hide it because it slows down ML if you have hardware acceleration in browser)")
parser.add_argument("--max-batch-count", type=int, default=16, help="maximum batch count value for the UI")
parser.add_argument("--embeddings-dir", type=str, default=os.path.join(script_path, 'embeddings'), help="embeddings directory for textual inversion (default: embeddings)")
parser.add_argument("--allow-code", action='store_true', help="allow custom script execution from webui")
parser.add_argument("--medvram", action='store_true', help="enable stable diffusion model optimizations for sacrificing a little speed for low VRM usage")
parser.add_argument("--lowvram", action='store_true', help="enable stable diffusion model optimizations for sacrificing a lot of speed for very low VRM usage")
parser.add_argument("--always-batch-cond-uncond", action='store_true', help="disables cond/uncond batching that is enabled to save memory with --medvram or --lowvram")
parser.add_argument("--unload-gfpgan", action='store_true', help="does not do anything.")
parser.add_argument("--precision", type=str, help="evaluate at this precision", choices=["full", "autocast"], default="autocast")
parser.add_argument("--share", action='store_true', help="use share=True for gradio and make the UI accessible through their site (doesn't work for me but you might have better luck)")
parser.add_argument("--esrgan-models-path", type=str, help="path to directory with ESRGAN models", default=os.path.join(script_path, 'ESRGAN'))
parser.add_argument("--opt-split-attention", action='store_true', help="does not do anything")
parser.add_argument("--disable-opt-split-attention", action='store_true', help="disable an optimization that reduces vram usage by a lot")
parser.add_argument("--opt-split-attention-v1", action='store_true', help="enable older version of split attention optimization that does not consaumes all the VRAM it can find")
parser.add_argument("--listen", action='store_true', help="launch gradio with 0.0.0.0 as server name, allowing to respond to network requests")
parser.add_argument("--port", type=int, help="launch gradio with given server port, you need root/admin rights for ports < 1024, defaults to 7860 if available", default=None)
parser.add_argument("--show-negative-prompt", action='store_true', help="does not do anything", default=False)
parser.add_argument("--ui-config-file", type=str, help="filename to use for ui configuration", default=os.path.join(script_path, 'ui-config.json'))
parser.add_argument("--hide-ui-dir-config", action='store_true', help="hide directory configuration from webui", default=False)
parser.add_argument("--ui-settings-file", type=str, help="filename to use for ui settings", default=os.path.join(script_path, 'config.json'))
parser.add_argument("--gradio-debug", action='store_true', help="launch gradio with --debug option")
parser.add_argument("--gradio-auth", type=str, help='set gradio authentication like "username:password"; or comma-delimit multiple like "u1:p1,u2:p2,u3:p3"', default=None)
parser.add_argument("--opt-channelslast", action='store_true', help="change memory type for stable diffusion to channels last")
parser.add_argument("--styles-file", type=str, help="filename to use for styles", default=os.path.join(script_path, 'styles.csv'))
cmd_opts = parser.parse_args()
if cmd_opts.opt_split_attention:
print("Information: --opt-split-attention is now the default. To remove this message, remove --opt-split-attention from command line arguments. To disable the optimization, use --disable-opt-split-attention")
device = get_optimal_device()
batch_cond_uncond = cmd_opts.always_batch_cond_uncond or not (cmd_opts.lowvram or cmd_opts.medvram)
parallel_processing_allowed = not cmd_opts.lowvram and not cmd_opts.medvram
config_filename = cmd_opts.ui_settings_file
class State:
interrupted = False
job = ""
job_no = 0
job_count = 0
sampling_step = 0
sampling_steps = 0
current_latent = None
current_image = None
current_image_sampling_step = 0
def interrupt(self):
self.interrupted = True
def nextjob(self):
self.job_no += 1
self.sampling_step = 0
self.current_image_sampling_step = 0
state = State()
artist_db = modules.artists.ArtistsDatabase(os.path.join(script_path, 'artists.csv'))
styles_filename = cmd_opts.styles_file
prompt_styles = modules.styles.StyleDatabase(styles_filename)
interrogator = modules.interrogate.InterrogateModels("interrogate")
face_restorers = []
modules.sd_models.list_models()
class Options:
class OptionInfo:
def __init__(self, default=None, label="", component=None, component_args=None, onchange=None):
self.default = default
self.label = label
self.component = component
self.component_args = component_args
self.onchange = onchange
data = None
hide_dirs = {"visible": False} if cmd_opts.hide_ui_dir_config else None
data_labels = {
"samples_filename_pattern": OptionInfo("", "Images filename pattern"),
"save_to_dirs": OptionInfo(False, "Save images to a subdirectory"),
"grid_save_to_dirs": OptionInfo(False, "Save grids to subdirectory"),
"directories_filename_pattern": OptionInfo("", "Directory name pattern"),
"outdir_samples": OptionInfo("", "Output directory for images; if empty, defaults to two directories below", component_args=hide_dirs),
"outdir_txt2img_samples": OptionInfo("outputs/txt2img-images", 'Output directory for txt2img images', component_args=hide_dirs),
"outdir_img2img_samples": OptionInfo("outputs/img2img-images", 'Output directory for img2img images', component_args=hide_dirs),
"outdir_extras_samples": OptionInfo("outputs/extras-images", 'Output directory for images from extras tab', component_args=hide_dirs),
"outdir_grids": OptionInfo("", "Output directory for grids; if empty, defaults to two directories below", component_args=hide_dirs),
"outdir_txt2img_grids": OptionInfo("outputs/txt2img-grids", 'Output directory for txt2img grids', component_args=hide_dirs),
"outdir_img2img_grids": OptionInfo("outputs/img2img-grids", 'Output directory for img2img grids', component_args=hide_dirs),
"outdir_save": OptionInfo("log/images", "Directory for saving images using the Save button", component_args=hide_dirs),
"samples_save": OptionInfo(True, "Always save all generated images"),
"save_selected_only": OptionInfo(False, "When using 'Save' button, only save a single selected image"),
"samples_format": OptionInfo('png', 'File format for individual samples'),
"filter_nsfw": OptionInfo(False, "Filter NSFW content"),
"grid_save": OptionInfo(True, "Always save all generated image grids"),
"return_grid": OptionInfo(True, "Show grid in results for web"),
"grid_format": OptionInfo('png', 'File format for grids'),
"grid_extended_filename": OptionInfo(False, "Add extended info (seed, prompt) to filename when saving grid"),
"grid_only_if_multiple": OptionInfo(True, "Do not save grids consisting of one picture"),
"n_rows": OptionInfo(-1, "Grid row count; use -1 for autodetect and 0 for it to be same as batch size", gr.Slider, {"minimum": -1, "maximum": 16, "step": 1}),
"jpeg_quality": OptionInfo(80, "Quality for saved jpeg images", gr.Slider, {"minimum": 1, "maximum": 100, "step": 1}),
"export_for_4chan": OptionInfo(True, "If PNG image is larger than 4MB or any dimension is larger than 4000, downscale and save copy as JPG"),
"enable_pnginfo": OptionInfo(True, "Save text information about generation parameters as chunks to png files"),
"add_model_hash_to_info": OptionInfo(False, "Add model hash to generation information"),
"img2img_color_correction": OptionInfo(False, "Apply color correction to img2img results to match original colors."),
"img2img_fix_steps": OptionInfo(False, "With img2img, do exactly the amount of steps the slider specifies (normaly you'd do less with less denoising)."),
"enable_quantization": OptionInfo(False, "Enable quantization in K samplers for sharper and cleaner results. This may change existing seeds. Requires restart to apply."),
"font": OptionInfo("", "Font for image grids that have text"),
"enable_emphasis": OptionInfo(True, "Use (text) to make model pay more attention to text and [text] to make it pay less attention"),
"enable_batch_seeds": OptionInfo(True, "Make K-diffusion samplers produce same images in a batch as when making a single image"),
"save_txt": OptionInfo(False, "Create a text file next to every image with generation parameters."),
"ESRGAN_tile": OptionInfo(192, "Tile size for upscaling. 0 = no tiling.", gr.Slider, {"minimum": 0, "maximum": 512, "step": 16}),
"ESRGAN_tile_overlap": OptionInfo(8, "Tile overlap, in pixels for upscaling. Low values = visible seam.", gr.Slider, {"minimum": 0, "maximum": 48, "step": 1}),
"random_artist_categories": OptionInfo([], "Allowed categories for random artists selection when using the Roll button", gr.CheckboxGroup, {"choices": artist_db.categories()}),
"upscale_at_full_resolution_padding": OptionInfo(16, "Inpainting at full resolution: padding, in pixels, for the masked region.", gr.Slider, {"minimum": 0, "maximum": 128, "step": 4}),
"show_progressbar": OptionInfo(True, "Show progressbar"),
"show_progress_every_n_steps": OptionInfo(0, "Show show image creation progress every N sampling steps. Set 0 to disable.", gr.Slider, {"minimum": 0, "maximum": 32, "step": 1}),
"multiple_tqdm": OptionInfo(True, "Add a second progress bar to the console that shows progress for an entire job. Broken in PyCharm console."),
"memmon_poll_rate": OptionInfo(8, "VRAM usage polls per second during generation. Set to 0 to disable.", gr.Slider, {"minimum": 0, "maximum": 40, "step":1}),
"face_restoration_model": OptionInfo(None, "Face restoration model", gr.Radio, lambda: {"choices": [x.name() for x in face_restorers]}),
"code_former_weight": OptionInfo(0.5, "CodeFormer weight parameter; 0 = maximum effect; 1 = minimum effect", gr.Slider, {"minimum": 0, "maximum": 1, "step": 0.01}),
"save_images_before_face_restoration": OptionInfo(False, "Save a copy of image before doing face restoration."),
"face_restoration_unload": OptionInfo(False, "Move face restoration model from VRAM into RAM after processing"),
"interrogate_keep_models_in_memory": OptionInfo(False, "Interrogate: keep models in VRAM"),
"interrogate_use_builtin_artists": OptionInfo(True, "Interrogate: use artists from artists.csv"),
"interrogate_clip_num_beams": OptionInfo(1, "Interrogate: num_beams for BLIP", gr.Slider, {"minimum": 1, "maximum": 16, "step": 1}),
"interrogate_clip_min_length": OptionInfo(24, "Interrogate: minimum description length (excluding artists, etc..)", gr.Slider, {"minimum": 1, "maximum": 128, "step": 1}),
"interrogate_clip_max_length": OptionInfo(48, "Interrogate: maximum description length", gr.Slider, {"minimum": 1, "maximum": 256, "step": 1}),
"interrogate_clip_dict_limit": OptionInfo(1500, "Interrogate: maximum number of lines in text file (0 = No limit)"),
"sd_model_checkpoint": OptionInfo(None, "Stable Diffusion checkpoint", gr.Radio, lambda: {"choices": [x.title for x in modules.sd_models.checkpoints_list.values()]}),
}
def __init__(self):
self.data = {k: v.default for k, v in self.data_labels.items()}
def __setattr__(self, key, value):
if self.data is not None:
if key in self.data:
self.data[key] = value
return super(Options, self).__setattr__(key, value)
def __getattr__(self, item):
if self.data is not None:
if item in self.data:
return self.data[item]
if item in self.data_labels:
return self.data_labels[item].default
return super(Options, self).__getattribute__(item)
def save(self, filename):
with open(filename, "w", encoding="utf8") as file:
json.dump(self.data, file)
def load(self, filename):
with open(filename, "r", encoding="utf8") as file:
self.data = json.load(file)
def onchange(self, key, func):
item = self.data_labels.get(key)
item.onchange = func
opts = Options()
if os.path.exists(config_filename):
opts.load(config_filename)
sd_upscalers = []
sd_model = None
progress_print_out = sys.stdout
class TotalTQDM:
def __init__(self):
self._tqdm = None
def reset(self):
self._tqdm = tqdm.tqdm(
desc="Total progress",
total=state.job_count * state.sampling_steps,
position=1,
file=progress_print_out
)
def update(self):
if not opts.multiple_tqdm:
return
if self._tqdm is None:
self.reset()
self._tqdm.update()
def clear(self):
if self._tqdm is not None:
self._tqdm.close()
self._tqdm = None
total_tqdm = TotalTQDM()
mem_mon = modules.memmon.MemUsageMonitor("MemMon", device, opts)
mem_mon.start()
<|code_end|>
webui.py
<|code_start|>import os
import threading
from modules.paths import script_path
import signal
from modules.shared import opts, cmd_opts, state
import modules.shared as shared
import modules.ui
import modules.scripts
import modules.sd_hijack
import modules.codeformer_model
import modules.gfpgan_model
import modules.face_restoration
import modules.realesrgan_model as realesrgan
import modules.esrgan_model as esrgan
import modules.extras
import modules.lowvram
import modules.txt2img
import modules.img2img
import modules.sd_models
modules.codeformer_model.setup_codeformer()
modules.gfpgan_model.setup_gfpgan()
shared.face_restorers.append(modules.face_restoration.FaceRestoration())
esrgan.load_models(cmd_opts.esrgan_models_path)
realesrgan.setup_realesrgan()
queue_lock = threading.Lock()
def wrap_queued_call(func):
def f(*args, **kwargs):
with queue_lock:
res = func(*args, **kwargs)
return res
return f
def wrap_gradio_gpu_call(func):
def f(*args, **kwargs):
shared.state.sampling_step = 0
shared.state.job_count = -1
shared.state.job_no = 0
shared.state.current_latent = None
shared.state.current_image = None
shared.state.current_image_sampling_step = 0
with queue_lock:
res = func(*args, **kwargs)
shared.state.job = ""
shared.state.job_count = 0
return res
return modules.ui.wrap_gradio_call(f)
modules.scripts.load_scripts(os.path.join(script_path, "scripts"))
shared.sd_model = modules.sd_models.load_model()
shared.opts.onchange("sd_model_checkpoint", wrap_queued_call(lambda: modules.sd_models.reload_model_weights(shared.sd_model)))
def webui():
# make the program just exit at ctrl+c without waiting for anything
def sigint_handler(sig, frame):
print(f'Interrupted with signal {sig} in {frame}')
os._exit(0)
signal.signal(signal.SIGINT, sigint_handler)
demo = modules.ui.create_ui(
txt2img=wrap_gradio_gpu_call(modules.txt2img.txt2img),
img2img=wrap_gradio_gpu_call(modules.img2img.img2img),
run_extras=wrap_gradio_gpu_call(modules.extras.run_extras),
run_pnginfo=modules.extras.run_pnginfo
)
demo.launch(
share=cmd_opts.share,
server_name="0.0.0.0" if cmd_opts.listen else None,
server_port=cmd_opts.port,
debug=cmd_opts.gradio_debug,
auth=[tuple(cred.split(':')) for cred in cmd_opts.gradio_auth.strip('"').split(',')] if cmd_opts.gradio_auth else None,
)
if __name__ == "__main__":
webui()
<|code_end|>
|
modules/shared.py
<|code_start|>import sys
import argparse
import json
import os
import gradio as gr
import torch
import tqdm
import modules.artists
from modules.paths import script_path, sd_path
from modules.devices import get_optimal_device
import modules.styles
import modules.interrogate
import modules.memmon
import modules.sd_models
sd_model_file = os.path.join(script_path, 'model.ckpt')
default_sd_model_file = sd_model_file
parser = argparse.ArgumentParser()
parser.add_argument("--config", type=str, default=os.path.join(sd_path, "configs/stable-diffusion/v1-inference.yaml"), help="path to config which constructs model",)
parser.add_argument("--ckpt", type=str, default=sd_model_file, help="path to checkpoint of stable diffusion model; this checkpoint will be added to the list of checkpoints and loaded by default if you don't have a checkpoint selected in settings",)
parser.add_argument("--ckpt-dir", type=str, default=os.path.join(script_path, 'models'), help="path to directory with stable diffusion checkpoints",)
parser.add_argument("--gfpgan-dir", type=str, help="GFPGAN directory", default=('./src/gfpgan' if os.path.exists('./src/gfpgan') else './GFPGAN'))
parser.add_argument("--gfpgan-model", type=str, help="GFPGAN model file name", default='GFPGANv1.3.pth')
parser.add_argument("--no-half", action='store_true', help="do not switch the model to 16-bit floats")
parser.add_argument("--no-progressbar-hiding", action='store_true', help="do not hide progressbar in gradio UI (we hide it because it slows down ML if you have hardware acceleration in browser)")
parser.add_argument("--max-batch-count", type=int, default=16, help="maximum batch count value for the UI")
parser.add_argument("--embeddings-dir", type=str, default=os.path.join(script_path, 'embeddings'), help="embeddings directory for textual inversion (default: embeddings)")
parser.add_argument("--allow-code", action='store_true', help="allow custom script execution from webui")
parser.add_argument("--medvram", action='store_true', help="enable stable diffusion model optimizations for sacrificing a little speed for low VRM usage")
parser.add_argument("--lowvram", action='store_true', help="enable stable diffusion model optimizations for sacrificing a lot of speed for very low VRM usage")
parser.add_argument("--always-batch-cond-uncond", action='store_true', help="disables cond/uncond batching that is enabled to save memory with --medvram or --lowvram")
parser.add_argument("--unload-gfpgan", action='store_true', help="does not do anything.")
parser.add_argument("--precision", type=str, help="evaluate at this precision", choices=["full", "autocast"], default="autocast")
parser.add_argument("--share", action='store_true', help="use share=True for gradio and make the UI accessible through their site (doesn't work for me but you might have better luck)")
parser.add_argument("--esrgan-models-path", type=str, help="path to directory with ESRGAN models", default=os.path.join(script_path, 'ESRGAN'))
parser.add_argument("--opt-split-attention", action='store_true', help="does not do anything")
parser.add_argument("--disable-opt-split-attention", action='store_true', help="disable an optimization that reduces vram usage by a lot")
parser.add_argument("--opt-split-attention-v1", action='store_true', help="enable older version of split attention optimization that does not consaumes all the VRAM it can find")
parser.add_argument("--listen", action='store_true', help="launch gradio with 0.0.0.0 as server name, allowing to respond to network requests")
parser.add_argument("--port", type=int, help="launch gradio with given server port, you need root/admin rights for ports < 1024, defaults to 7860 if available", default=None)
parser.add_argument("--show-negative-prompt", action='store_true', help="does not do anything", default=False)
parser.add_argument("--ui-config-file", type=str, help="filename to use for ui configuration", default=os.path.join(script_path, 'ui-config.json'))
parser.add_argument("--hide-ui-dir-config", action='store_true', help="hide directory configuration from webui", default=False)
parser.add_argument("--ui-settings-file", type=str, help="filename to use for ui settings", default=os.path.join(script_path, 'config.json'))
parser.add_argument("--gradio-debug", action='store_true', help="launch gradio with --debug option")
parser.add_argument("--gradio-auth", type=str, help='set gradio authentication like "username:password"; or comma-delimit multiple like "u1:p1,u2:p2,u3:p3"', default=None)
parser.add_argument("--opt-channelslast", action='store_true', help="change memory type for stable diffusion to channels last")
parser.add_argument("--styles-file", type=str, help="filename to use for styles", default=os.path.join(script_path, 'styles.csv'))
parser.add_argument("--autolaunch", action=argparse.BooleanOptionalAction, help="open the webui URL in the system's default browser upon launch", default=False)
cmd_opts = parser.parse_args()
if cmd_opts.opt_split_attention:
print("Information: --opt-split-attention is now the default. To remove this message, remove --opt-split-attention from command line arguments. To disable the optimization, use --disable-opt-split-attention")
device = get_optimal_device()
batch_cond_uncond = cmd_opts.always_batch_cond_uncond or not (cmd_opts.lowvram or cmd_opts.medvram)
parallel_processing_allowed = not cmd_opts.lowvram and not cmd_opts.medvram
config_filename = cmd_opts.ui_settings_file
class State:
interrupted = False
job = ""
job_no = 0
job_count = 0
sampling_step = 0
sampling_steps = 0
current_latent = None
current_image = None
current_image_sampling_step = 0
def interrupt(self):
self.interrupted = True
def nextjob(self):
self.job_no += 1
self.sampling_step = 0
self.current_image_sampling_step = 0
state = State()
artist_db = modules.artists.ArtistsDatabase(os.path.join(script_path, 'artists.csv'))
styles_filename = cmd_opts.styles_file
prompt_styles = modules.styles.StyleDatabase(styles_filename)
interrogator = modules.interrogate.InterrogateModels("interrogate")
face_restorers = []
modules.sd_models.list_models()
class Options:
class OptionInfo:
def __init__(self, default=None, label="", component=None, component_args=None, onchange=None):
self.default = default
self.label = label
self.component = component
self.component_args = component_args
self.onchange = onchange
data = None
hide_dirs = {"visible": False} if cmd_opts.hide_ui_dir_config else None
data_labels = {
"samples_filename_pattern": OptionInfo("", "Images filename pattern"),
"save_to_dirs": OptionInfo(False, "Save images to a subdirectory"),
"grid_save_to_dirs": OptionInfo(False, "Save grids to subdirectory"),
"directories_filename_pattern": OptionInfo("", "Directory name pattern"),
"outdir_samples": OptionInfo("", "Output directory for images; if empty, defaults to two directories below", component_args=hide_dirs),
"outdir_txt2img_samples": OptionInfo("outputs/txt2img-images", 'Output directory for txt2img images', component_args=hide_dirs),
"outdir_img2img_samples": OptionInfo("outputs/img2img-images", 'Output directory for img2img images', component_args=hide_dirs),
"outdir_extras_samples": OptionInfo("outputs/extras-images", 'Output directory for images from extras tab', component_args=hide_dirs),
"outdir_grids": OptionInfo("", "Output directory for grids; if empty, defaults to two directories below", component_args=hide_dirs),
"outdir_txt2img_grids": OptionInfo("outputs/txt2img-grids", 'Output directory for txt2img grids', component_args=hide_dirs),
"outdir_img2img_grids": OptionInfo("outputs/img2img-grids", 'Output directory for img2img grids', component_args=hide_dirs),
"outdir_save": OptionInfo("log/images", "Directory for saving images using the Save button", component_args=hide_dirs),
"samples_save": OptionInfo(True, "Always save all generated images"),
"save_selected_only": OptionInfo(False, "When using 'Save' button, only save a single selected image"),
"samples_format": OptionInfo('png', 'File format for individual samples'),
"filter_nsfw": OptionInfo(False, "Filter NSFW content"),
"grid_save": OptionInfo(True, "Always save all generated image grids"),
"return_grid": OptionInfo(True, "Show grid in results for web"),
"grid_format": OptionInfo('png', 'File format for grids'),
"grid_extended_filename": OptionInfo(False, "Add extended info (seed, prompt) to filename when saving grid"),
"grid_only_if_multiple": OptionInfo(True, "Do not save grids consisting of one picture"),
"n_rows": OptionInfo(-1, "Grid row count; use -1 for autodetect and 0 for it to be same as batch size", gr.Slider, {"minimum": -1, "maximum": 16, "step": 1}),
"jpeg_quality": OptionInfo(80, "Quality for saved jpeg images", gr.Slider, {"minimum": 1, "maximum": 100, "step": 1}),
"export_for_4chan": OptionInfo(True, "If PNG image is larger than 4MB or any dimension is larger than 4000, downscale and save copy as JPG"),
"enable_pnginfo": OptionInfo(True, "Save text information about generation parameters as chunks to png files"),
"add_model_hash_to_info": OptionInfo(False, "Add model hash to generation information"),
"img2img_color_correction": OptionInfo(False, "Apply color correction to img2img results to match original colors."),
"img2img_fix_steps": OptionInfo(False, "With img2img, do exactly the amount of steps the slider specifies (normaly you'd do less with less denoising)."),
"enable_quantization": OptionInfo(False, "Enable quantization in K samplers for sharper and cleaner results. This may change existing seeds. Requires restart to apply."),
"font": OptionInfo("", "Font for image grids that have text"),
"enable_emphasis": OptionInfo(True, "Use (text) to make model pay more attention to text and [text] to make it pay less attention"),
"enable_batch_seeds": OptionInfo(True, "Make K-diffusion samplers produce same images in a batch as when making a single image"),
"save_txt": OptionInfo(False, "Create a text file next to every image with generation parameters."),
"ESRGAN_tile": OptionInfo(192, "Tile size for upscaling. 0 = no tiling.", gr.Slider, {"minimum": 0, "maximum": 512, "step": 16}),
"ESRGAN_tile_overlap": OptionInfo(8, "Tile overlap, in pixels for upscaling. Low values = visible seam.", gr.Slider, {"minimum": 0, "maximum": 48, "step": 1}),
"random_artist_categories": OptionInfo([], "Allowed categories for random artists selection when using the Roll button", gr.CheckboxGroup, {"choices": artist_db.categories()}),
"upscale_at_full_resolution_padding": OptionInfo(16, "Inpainting at full resolution: padding, in pixels, for the masked region.", gr.Slider, {"minimum": 0, "maximum": 128, "step": 4}),
"show_progressbar": OptionInfo(True, "Show progressbar"),
"show_progress_every_n_steps": OptionInfo(0, "Show show image creation progress every N sampling steps. Set 0 to disable.", gr.Slider, {"minimum": 0, "maximum": 32, "step": 1}),
"multiple_tqdm": OptionInfo(True, "Add a second progress bar to the console that shows progress for an entire job. Broken in PyCharm console."),
"memmon_poll_rate": OptionInfo(8, "VRAM usage polls per second during generation. Set to 0 to disable.", gr.Slider, {"minimum": 0, "maximum": 40, "step":1}),
"face_restoration_model": OptionInfo(None, "Face restoration model", gr.Radio, lambda: {"choices": [x.name() for x in face_restorers]}),
"code_former_weight": OptionInfo(0.5, "CodeFormer weight parameter; 0 = maximum effect; 1 = minimum effect", gr.Slider, {"minimum": 0, "maximum": 1, "step": 0.01}),
"save_images_before_face_restoration": OptionInfo(False, "Save a copy of image before doing face restoration."),
"face_restoration_unload": OptionInfo(False, "Move face restoration model from VRAM into RAM after processing"),
"interrogate_keep_models_in_memory": OptionInfo(False, "Interrogate: keep models in VRAM"),
"interrogate_use_builtin_artists": OptionInfo(True, "Interrogate: use artists from artists.csv"),
"interrogate_clip_num_beams": OptionInfo(1, "Interrogate: num_beams for BLIP", gr.Slider, {"minimum": 1, "maximum": 16, "step": 1}),
"interrogate_clip_min_length": OptionInfo(24, "Interrogate: minimum description length (excluding artists, etc..)", gr.Slider, {"minimum": 1, "maximum": 128, "step": 1}),
"interrogate_clip_max_length": OptionInfo(48, "Interrogate: maximum description length", gr.Slider, {"minimum": 1, "maximum": 256, "step": 1}),
"interrogate_clip_dict_limit": OptionInfo(1500, "Interrogate: maximum number of lines in text file (0 = No limit)"),
"sd_model_checkpoint": OptionInfo(None, "Stable Diffusion checkpoint", gr.Radio, lambda: {"choices": [x.title for x in modules.sd_models.checkpoints_list.values()]}),
}
def __init__(self):
self.data = {k: v.default for k, v in self.data_labels.items()}
def __setattr__(self, key, value):
if self.data is not None:
if key in self.data:
self.data[key] = value
return super(Options, self).__setattr__(key, value)
def __getattr__(self, item):
if self.data is not None:
if item in self.data:
return self.data[item]
if item in self.data_labels:
return self.data_labels[item].default
return super(Options, self).__getattribute__(item)
def save(self, filename):
with open(filename, "w", encoding="utf8") as file:
json.dump(self.data, file)
def load(self, filename):
with open(filename, "r", encoding="utf8") as file:
self.data = json.load(file)
def onchange(self, key, func):
item = self.data_labels.get(key)
item.onchange = func
opts = Options()
if os.path.exists(config_filename):
opts.load(config_filename)
sd_upscalers = []
sd_model = None
progress_print_out = sys.stdout
class TotalTQDM:
def __init__(self):
self._tqdm = None
def reset(self):
self._tqdm = tqdm.tqdm(
desc="Total progress",
total=state.job_count * state.sampling_steps,
position=1,
file=progress_print_out
)
def update(self):
if not opts.multiple_tqdm:
return
if self._tqdm is None:
self.reset()
self._tqdm.update()
def clear(self):
if self._tqdm is not None:
self._tqdm.close()
self._tqdm = None
total_tqdm = TotalTQDM()
mem_mon = modules.memmon.MemUsageMonitor("MemMon", device, opts)
mem_mon.start()
<|code_end|>
webui.py
<|code_start|>import os
import threading
from modules.paths import script_path
import signal
from modules.shared import opts, cmd_opts, state
import modules.shared as shared
import modules.ui
import modules.scripts
import modules.sd_hijack
import modules.codeformer_model
import modules.gfpgan_model
import modules.face_restoration
import modules.realesrgan_model as realesrgan
import modules.esrgan_model as esrgan
import modules.extras
import modules.lowvram
import modules.txt2img
import modules.img2img
import modules.sd_models
modules.codeformer_model.setup_codeformer()
modules.gfpgan_model.setup_gfpgan()
shared.face_restorers.append(modules.face_restoration.FaceRestoration())
esrgan.load_models(cmd_opts.esrgan_models_path)
realesrgan.setup_realesrgan()
queue_lock = threading.Lock()
def wrap_queued_call(func):
def f(*args, **kwargs):
with queue_lock:
res = func(*args, **kwargs)
return res
return f
def wrap_gradio_gpu_call(func):
def f(*args, **kwargs):
shared.state.sampling_step = 0
shared.state.job_count = -1
shared.state.job_no = 0
shared.state.current_latent = None
shared.state.current_image = None
shared.state.current_image_sampling_step = 0
with queue_lock:
res = func(*args, **kwargs)
shared.state.job = ""
shared.state.job_count = 0
return res
return modules.ui.wrap_gradio_call(f)
modules.scripts.load_scripts(os.path.join(script_path, "scripts"))
shared.sd_model = modules.sd_models.load_model()
shared.opts.onchange("sd_model_checkpoint", wrap_queued_call(lambda: modules.sd_models.reload_model_weights(shared.sd_model)))
def webui():
# make the program just exit at ctrl+c without waiting for anything
def sigint_handler(sig, frame):
print(f'Interrupted with signal {sig} in {frame}')
os._exit(0)
signal.signal(signal.SIGINT, sigint_handler)
demo = modules.ui.create_ui(
txt2img=wrap_gradio_gpu_call(modules.txt2img.txt2img),
img2img=wrap_gradio_gpu_call(modules.img2img.img2img),
run_extras=wrap_gradio_gpu_call(modules.extras.run_extras),
run_pnginfo=modules.extras.run_pnginfo
)
demo.launch(
share=cmd_opts.share,
server_name="0.0.0.0" if cmd_opts.listen else None,
server_port=cmd_opts.port,
debug=cmd_opts.gradio_debug,
auth=[tuple(cred.split(':')) for cred in cmd_opts.gradio_auth.strip('"').split(',')] if cmd_opts.gradio_auth else None,
inbrowser=cmd_opts.autolaunch,
)
if __name__ == "__main__":
webui()
<|code_end|>
|
GFPGAN restore faces error
Using GFPGAN restore faces gives following error
Traceback (most recent call last):
File "/home/x/stable-diff/stable-diffusion-webui/modules/ui.py", line 128, in f
res = list(func(*args, **kwargs))
File "/home/x/stable-diff/stable-diffusion-webui/webui.py", line 55, in f
res = func(*args, **kwargs)
File "/home/x/stable-diff/stable-diffusion-webui/modules/txt2img.py", line 39, in txt2img
processed = process_images(p)
File "/home/x/stable-diff/stable-diffusion-webui/modules/processing.py", line 314, in process_images
x_sample = modules.face_restoration.restore_faces(x_sample)
File "/home/x/stable-diff/stable-diffusion-webui/modules/face_restoration.py", line 19, in restore_faces
return face_restorer.restore(np_image)
File "/home/x/stable-diff/stable-diffusion-webui/modules/codeformer_model.py", line 79, in restore
self.face_helper.get_face_landmarks_5(only_center_face=False, resize=640, eye_dist_threshold=5)
File "/home/x/stable-diff/stable-diffusion-webui/repositories/CodeFormer/facelib/utils/face_restoration_helper.py", line 151, in get_face_landmarks_5
bboxes = self.face_det.detect_faces(input_img)
File "/home/x/stable-diff/stable-diffusion-webui/repositories/CodeFormer/facelib/detection/retinaface/retinaface.py", line 231, in detect_faces
keep = py_cpu_nms(bounding_boxes, nms_threshold)
File "/home/x/stable-diff/stable-diffusion-webui/repositories/CodeFormer/facelib/detection/retinaface/retinaface_utils.py", line 41, in py_cpu_nms
keep = torchvision.ops.nms(
File "/home/x/.local/lib/python3.10/site-packages/torchvision/ops/boxes.py", line 40, in nms
_assert_has_ops()
File "/home/x/.local/lib/python3.10/site-packages/torchvision/extension.py", line 33, in _assert_has_ops
raise RuntimeError(
RuntimeError: Couldn't load custom C++ ops. This can happen if your PyTorch and torchvision versions are incompatible, or if you had errors while compiling torchvision from source. For further information on the compatible versions, check https://github.com/pytorch/vision#installation for the compatibility matrix. Please check your PyTorch version with torch.__version__ and your torchvision version with torchvision.__version__ and verify if they are compatible, and if not please reinstall torchvision so that it matches your PyTorch install.
Running: python -c "import torch; import torchvision; print(torch.__version__); print(torchvision.__version__)"
Gives the following results:
1.12.1+cu113
0.13.1+cu102
on Latest Arch Linux.
GFPGAN works without issues in this similar tool: https://github.com/cmdr2/stable-diffusion-ui
|
launch.py
<|code_start|># this scripts installs necessary requirements and launches main program in webui.py
import subprocess
import os
import sys
import importlib.util
import shlex
dir_repos = "repositories"
dir_tmp = "tmp"
python = sys.executable
git = os.environ.get('GIT', "git")
torch_command = os.environ.get('TORCH_COMMAND', "pip install torch==1.12.1+cu113 --extra-index-url https://download.pytorch.org/whl/cu113")
requirements_file = os.environ.get('REQS_FILE', "requirements_versions.txt")
commandline_args = os.environ.get('COMMANDLINE_ARGS', "")
k_diffusion_package = os.environ.get('K_DIFFUSION_PACKAGE', "git+https://github.com/crowsonkb/k-diffusion.git@1a0703dfb7d24d8806267c3e7ccc4caf67fd1331")
gfpgan_package = os.environ.get('GFPGAN_PACKAGE', "git+https://github.com/TencentARC/GFPGAN.git@8d2447a2d918f8eba5a4a01463fd48e45126a379")
stable_diffusion_commit_hash = os.environ.get('STABLE_DIFFUSION_COMMIT_HASH', "69ae4b35e0a0f6ee1af8bb9a5d0016ccb27e36dc")
taming_transformers_commit_hash = os.environ.get('TAMING_TRANSFORMERS_COMMIT_HASH', "24268930bf1dce879235a7fddd0b2355b84d7ea6")
codeformer_commit_hash = os.environ.get('CODEFORMER_COMMIT_HASH', "c5b4593074ba6214284d6acd5f1719b6c5d739af")
blip_commit_hash = os.environ.get('BLIP_COMMIT_HASH', "48211a1594f1321b00f14c9f7a5b4813144b2fb9")
args = shlex.split(commandline_args)
def extract_arg(args, name):
return [x for x in args if x != name], name in args
args, skip_torch_cuda_test = extract_arg(args, '--skip-torch-cuda-test')
def repo_dir(name):
return os.path.join(dir_repos, name)
def run(command, desc=None, errdesc=None):
if desc is not None:
print(desc)
result = subprocess.run(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
if result.returncode != 0:
message = f"""{errdesc or 'Error running command'}.
Command: {command}
Error code: {result.returncode}
stdout: {result.stdout.decode(encoding="utf8", errors="ignore") if len(result.stdout)>0 else '<empty>'}
stderr: {result.stderr.decode(encoding="utf8", errors="ignore") if len(result.stderr)>0 else '<empty>'}
"""
raise RuntimeError(message)
return result.stdout.decode(encoding="utf8", errors="ignore")
def run_python(code, desc=None, errdesc=None):
return run(f'"{python}" -c "{code}"', desc, errdesc)
def run_pip(args, desc=None):
return run(f'"{python}" -m pip {args} --prefer-binary', desc=f"Installing {desc}", errdesc=f"Couldn't install {desc}")
def check_run(command):
result = subprocess.run(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
return result.returncode == 0
def check_run_python(code):
return check_run(f'"{python}" -c "{code}"')
def is_installed(package):
try:
spec = importlib.util.find_spec(package)
except ModuleNotFoundError:
return False
return spec is not None
def git_clone(url, dir, name, commithash=None):
# TODO clone into temporary dir and move if successful
if os.path.exists(dir):
return
run(f'"{git}" clone "{url}" "{dir}"', f"Cloning {name} into {dir}...", f"Couldn't clone {name}")
if commithash is not None:
run(f'"{git}" -C {dir} checkout {commithash}', None, "Couldn't checkout {name}'s hash: {commithash}")
try:
commit = run(f"{git} rev-parse HEAD").strip()
except Exception:
commit = "<none>"
print(f"Python {sys.version}")
print(f"Commit hash: {commit}")
if not is_installed("torch"):
run(f'"{python}" -m {torch_command}', "Installing torch", "Couldn't install torch")
if not skip_torch_cuda_test:
run_python("import torch; assert torch.cuda.is_available(), 'Torch is not able to use GPU; add --skip-torch-cuda-test to COMMANDINE_ARGS variable to disable this check'")
if not is_installed("k_diffusion.sampling"):
run_pip(f"install {k_diffusion_package}", "k-diffusion")
if not is_installed("gfpgan"):
run_pip(f"install {gfpgan_package}", "gfpgan")
os.makedirs(dir_repos, exist_ok=True)
git_clone("https://github.com/CompVis/stable-diffusion.git", repo_dir('stable-diffusion'), "Stable Diffusion", stable_diffusion_commit_hash)
git_clone("https://github.com/CompVis/taming-transformers.git", repo_dir('taming-transformers'), "Taming Transformers", taming_transformers_commit_hash)
git_clone("https://github.com/sczhou/CodeFormer.git", repo_dir('CodeFormer'), "CodeFormer", codeformer_commit_hash)
git_clone("https://github.com/salesforce/BLIP.git", repo_dir('BLIP'), "BLIP", blip_commit_hash)
if not is_installed("lpips"):
run_pip(f"install -r {os.path.join(repo_dir('CodeFormer'), 'requirements.txt')}", "requirements for CodeFormer")
run_pip(f"install -r {requirements_file}", "requirements for Web UI")
sys.argv += args
def start_webui():
print(f"Launching Web UI with arguments: {' '.join(sys.argv[1:])}")
import webui
webui.webui()
if __name__ == "__main__":
start_webui()
<|code_end|>
|
launch.py
<|code_start|># this scripts installs necessary requirements and launches main program in webui.py
import subprocess
import os
import sys
import importlib.util
import shlex
dir_repos = "repositories"
dir_tmp = "tmp"
python = sys.executable
git = os.environ.get('GIT', "git")
torch_command = os.environ.get('TORCH_COMMAND', "pip install torch==1.12.1+cu113 torchvision==0.13.1+cu113 --extra-index-url https://download.pytorch.org/whl/cu113")
requirements_file = os.environ.get('REQS_FILE', "requirements_versions.txt")
commandline_args = os.environ.get('COMMANDLINE_ARGS', "")
k_diffusion_package = os.environ.get('K_DIFFUSION_PACKAGE', "git+https://github.com/crowsonkb/k-diffusion.git@1a0703dfb7d24d8806267c3e7ccc4caf67fd1331")
gfpgan_package = os.environ.get('GFPGAN_PACKAGE', "git+https://github.com/TencentARC/GFPGAN.git@8d2447a2d918f8eba5a4a01463fd48e45126a379")
stable_diffusion_commit_hash = os.environ.get('STABLE_DIFFUSION_COMMIT_HASH', "69ae4b35e0a0f6ee1af8bb9a5d0016ccb27e36dc")
taming_transformers_commit_hash = os.environ.get('TAMING_TRANSFORMERS_COMMIT_HASH', "24268930bf1dce879235a7fddd0b2355b84d7ea6")
codeformer_commit_hash = os.environ.get('CODEFORMER_COMMIT_HASH', "c5b4593074ba6214284d6acd5f1719b6c5d739af")
blip_commit_hash = os.environ.get('BLIP_COMMIT_HASH', "48211a1594f1321b00f14c9f7a5b4813144b2fb9")
def repo_dir(name):
return os.path.join(dir_repos, name)
def run(command, desc=None, errdesc=None):
if desc is not None:
print(desc)
result = subprocess.run(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
if result.returncode != 0:
message = f"""{errdesc or 'Error running command'}.
Command: {command}
Error code: {result.returncode}
stdout: {result.stdout.decode(encoding="utf8", errors="ignore") if len(result.stdout)>0 else '<empty>'}
stderr: {result.stderr.decode(encoding="utf8", errors="ignore") if len(result.stderr)>0 else '<empty>'}
"""
raise RuntimeError(message)
return result.stdout.decode(encoding="utf8", errors="ignore")
def run_python(code, desc=None, errdesc=None):
return run(f'"{python}" -c "{code}"', desc, errdesc)
def run_pip(args, desc=None):
return run(f'"{python}" -m pip {args} --prefer-binary', desc=f"Installing {desc}", errdesc=f"Couldn't install {desc}")
def check_run(command):
result = subprocess.run(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
return result.returncode == 0
def check_run_python(code):
return check_run(f'"{python}" -c "{code}"')
def is_installed(package):
try:
spec = importlib.util.find_spec(package)
except ModuleNotFoundError:
return False
return spec is not None
def git_clone(url, dir, name, commithash=None):
# TODO clone into temporary dir and move if successful
if os.path.exists(dir):
return
run(f'"{git}" clone "{url}" "{dir}"', f"Cloning {name} into {dir}...", f"Couldn't clone {name}")
if commithash is not None:
run(f'"{git}" -C {dir} checkout {commithash}', None, "Couldn't checkout {name}'s hash: {commithash}")
try:
commit = run(f"{git} rev-parse HEAD").strip()
except Exception:
commit = "<none>"
print(f"Python {sys.version}")
print(f"Commit hash: {commit}")
if not is_installed("torch") or not is_installed("torchvision"):
run(f'"{python}" -m {torch_command}', "Installing torch and torchvision", "Couldn't install torch")
run_python("import torch; assert torch.cuda.is_available(), 'Torch is not able to use GPU'")
if not is_installed("k_diffusion.sampling"):
run_pip(f"install {k_diffusion_package}", "k-diffusion")
if not is_installed("gfpgan"):
run_pip(f"install {gfpgan_package}", "gfpgan")
os.makedirs(dir_repos, exist_ok=True)
git_clone("https://github.com/CompVis/stable-diffusion.git", repo_dir('stable-diffusion'), "Stable Diffusion", stable_diffusion_commit_hash)
git_clone("https://github.com/CompVis/taming-transformers.git", repo_dir('taming-transformers'), "Taming Transformers", taming_transformers_commit_hash)
git_clone("https://github.com/sczhou/CodeFormer.git", repo_dir('CodeFormer'), "CodeFormer", codeformer_commit_hash)
git_clone("https://github.com/salesforce/BLIP.git", repo_dir('BLIP'), "BLIP", blip_commit_hash)
if not is_installed("lpips"):
run_pip(f"install -r {os.path.join(repo_dir('CodeFormer'), 'requirements.txt')}", "requirements for CodeFormer")
run_pip(f"install -r {requirements_file}", "requirements for Web UI")
sys.argv += shlex.split(commandline_args)
def start_webui():
print(f"Launching Web UI with arguments: {' '.join(sys.argv[1:])}")
import webui
webui.webui()
start_webui()
<|code_end|>
|
Images being cleared on dragover
With 9035afb, dragging over an image will trigger the reset of that image. I don't find that a logical UI behaviour from a UX point of view. I might accidentally drag something over the browser window and involuntarily lose the current image.
@trufty, would you mind explaining why that change was made?
If it was merely to fix the issue that in the PNG Info tab the info was not updated when replacing the image via paste or drop, I have already prepared a solution for that without clearing the image on dragover. In that case I'd make a PR that reverses that change and implements the (in my view) better fix for that issue.
|
modules/ui.py
<|code_start|>import base64
import html
import io
import json
import math
import mimetypes
import os
import random
import sys
import time
import traceback
import numpy as np
import torch
from PIL import Image
import gradio as gr
import gradio.utils
import gradio.routes
from modules.paths import script_path
from modules.shared import opts, cmd_opts
import modules.shared as shared
from modules.sd_samplers import samplers, samplers_for_img2img
import modules.realesrgan_model as realesrgan
import modules.scripts
import modules.gfpgan_model
import modules.codeformer_model
import modules.styles
# this is a fix for Windows users. Without it, javascript files will be served with text/html content-type and the bowser will not show any UI
mimetypes.init()
mimetypes.add_type('application/javascript', '.js')
if not cmd_opts.share and not cmd_opts.listen:
# fix gradio phoning home
gradio.utils.version_check = lambda: None
gradio.utils.get_local_ip_address = lambda: '127.0.0.1'
def gr_show(visible=True):
return {"visible": visible, "__type__": "update"}
sample_img2img = "assets/stable-samples/img2img/sketch-mountains-input.jpg"
sample_img2img = sample_img2img if os.path.exists(sample_img2img) else None
css_hide_progressbar = """
.wrap .m-12 svg { display:none!important; }
.wrap .m-12::before { content:"Loading..." }
.progress-bar { display:none!important; }
.meta-text { display:none!important; }
"""
# Using constants for these since the variation selector isn't visible.
# Important that they exactly match script.js for tooltip to work.
random_symbol = '\U0001f3b2\ufe0f' # 🎲️
reuse_symbol = '\u267b\ufe0f' # ♻️
def plaintext_to_html(text):
text = "<p>" + "<br>\n".join([f"{html.escape(x)}" for x in text.split('\n')]) + "</p>"
return text
def image_from_url_text(filedata):
if type(filedata) == list:
if len(filedata) == 0:
return None
filedata = filedata[0]
if filedata.startswith("data:image/png;base64,"):
filedata = filedata[len("data:image/png;base64,"):]
filedata = base64.decodebytes(filedata.encode('utf-8'))
image = Image.open(io.BytesIO(filedata))
return image
def send_gradio_gallery_to_image(x):
if len(x) == 0:
return None
return image_from_url_text(x[0])
def save_files(js_data, images, index):
import csv
os.makedirs(opts.outdir_save, exist_ok=True)
filenames = []
data = json.loads(js_data)
if index > -1 and opts.save_selected_only and (index > 0 or not opts.return_grid): # ensures we are looking at a specific non-grid picture, and we have save_selected_only
images = [images[index]]
data["seed"] += (index - 1 if opts.return_grid else index)
with open(os.path.join(opts.outdir_save, "log.csv"), "a", encoding="utf8", newline='') as file:
at_start = file.tell() == 0
writer = csv.writer(file)
if at_start:
writer.writerow(["prompt", "seed", "width", "height", "sampler", "cfgs", "steps", "filename", "negative_prompt"])
filename_base = str(int(time.time() * 1000))
for i, filedata in enumerate(images):
filename = filename_base + ("" if len(images) == 1 else "-" + str(i + 1)) + ".png"
filepath = os.path.join(opts.outdir_save, filename)
if filedata.startswith("data:image/png;base64,"):
filedata = filedata[len("data:image/png;base64,"):]
with open(filepath, "wb") as imgfile:
imgfile.write(base64.decodebytes(filedata.encode('utf-8')))
filenames.append(filename)
writer.writerow([data["prompt"], data["seed"], data["width"], data["height"], data["sampler"], data["cfg_scale"], data["steps"], filenames[0], data["negative_prompt"]])
return '', '', plaintext_to_html(f"Saved: {filenames[0]}")
def wrap_gradio_call(func):
def f(*args, **kwargs):
run_memmon = opts.memmon_poll_rate > 0 and not shared.mem_mon.disabled
if run_memmon:
shared.mem_mon.monitor()
t = time.perf_counter()
try:
res = list(func(*args, **kwargs))
except Exception as e:
print("Error completing request", file=sys.stderr)
print("Arguments:", args, kwargs, file=sys.stderr)
print(traceback.format_exc(), file=sys.stderr)
shared.state.job = ""
shared.state.job_count = 0
res = [None, '', f"<div class='error'>{plaintext_to_html(type(e).__name__+': '+str(e))}</div>"]
elapsed = time.perf_counter() - t
if run_memmon:
mem_stats = {k: -(v//-(1024*1024)) for k, v in shared.mem_mon.stop().items()}
active_peak = mem_stats['active_peak']
reserved_peak = mem_stats['reserved_peak']
sys_peak = mem_stats['system_peak']
sys_total = mem_stats['total']
sys_pct = round(sys_peak/max(sys_total, 1) * 100, 2)
vram_html = f"<p class='vram'>Torch active/reserved: {active_peak}/{reserved_peak} MiB, <wbr>Sys VRAM: {sys_peak}/{sys_total} MiB ({sys_pct}%)</p>"
else:
vram_html = ''
# last item is always HTML
res[-1] += f"<div class='performance'><p class='time'>Time taken: <wbr>{elapsed:.2f}s</p>{vram_html}</div>"
shared.state.interrupted = False
return tuple(res)
return f
def check_progress_call():
if shared.state.job_count == 0:
return "", gr_show(False), gr_show(False)
progress = 0
if shared.state.job_count > 0:
progress += shared.state.job_no / shared.state.job_count
if shared.state.sampling_steps > 0:
progress += 1 / shared.state.job_count * shared.state.sampling_step / shared.state.sampling_steps
progress = min(progress, 1)
progressbar = ""
if opts.show_progressbar:
progressbar = f"""<div class='progressDiv'><div class='progress' style="width:{progress * 100}%">{str(int(progress*100))+"%" if progress > 0.01 else ""}</div></div>"""
image = gr_show(False)
preview_visibility = gr_show(False)
if opts.show_progress_every_n_steps > 0:
if shared.parallel_processing_allowed:
if shared.state.sampling_step - shared.state.current_image_sampling_step >= opts.show_progress_every_n_steps and shared.state.current_latent is not None:
shared.state.current_image = modules.sd_samplers.sample_to_image(shared.state.current_latent)
shared.state.current_image_sampling_step = shared.state.sampling_step
image = shared.state.current_image
if image is None or progress >= 1:
image = gr.update(value=None)
else:
preview_visibility = gr_show(True)
return f"<span style='display: none'>{time.time()}</span><p>{progressbar}</p>", preview_visibility, image
def check_progress_call_initial():
shared.state.job_count = -1
shared.state.current_latent = None
shared.state.current_image = None
return check_progress_call()
def roll_artist(prompt):
allowed_cats = set([x for x in shared.artist_db.categories() if len(opts.random_artist_categories)==0 or x in opts.random_artist_categories])
artist = random.choice([x for x in shared.artist_db.artists if x.category in allowed_cats])
return prompt + ", " + artist.name if prompt != '' else artist.name
def visit(x, func, path=""):
if hasattr(x, 'children'):
for c in x.children:
visit(c, func, path)
elif x.label is not None:
func(path + "/" + str(x.label), x)
def add_style(name: str, prompt: str, negative_prompt: str):
if name is None:
return [gr_show(), gr_show()]
style = modules.styles.PromptStyle(name, prompt, negative_prompt)
shared.prompt_styles.styles[style.name] = style
# Save all loaded prompt styles: this allows us to update the storage format in the future more easily, because we
# reserialize all styles every time we save them
shared.prompt_styles.save_styles(shared.styles_filename)
update = {"visible": True, "choices": list(shared.prompt_styles.styles), "__type__": "update"}
return [update, update, update, update]
def apply_styles(prompt, prompt_neg, style1_name, style2_name):
prompt = shared.prompt_styles.apply_styles_to_prompt(prompt, [style1_name, style2_name])
prompt_neg = shared.prompt_styles.apply_negative_styles_to_prompt(prompt_neg, [style1_name, style2_name])
return [gr.Textbox.update(value=prompt), gr.Textbox.update(value=prompt_neg), gr.Dropdown.update(value="None"), gr.Dropdown.update(value="None")]
def interrogate(image):
prompt = shared.interrogator.interrogate(image)
return gr_show(True) if prompt is None else prompt
def create_seed_inputs():
with gr.Row():
with gr.Box():
with gr.Row(elem_id='seed_row'):
seed = (gr.Textbox if cmd_opts.use_textbox_seed else gr.Number)(label='Seed', value=-1)
seed.style(container=False)
random_seed = gr.Button(random_symbol, elem_id='random_seed')
reuse_seed = gr.Button(reuse_symbol, elem_id='reuse_seed')
with gr.Box(elem_id='subseed_show_box'):
seed_checkbox = gr.Checkbox(label='Extra', elem_id='subseed_show', value=False)
# Components to show/hide based on the 'Extra' checkbox
seed_extras = []
with gr.Row(visible=False) as seed_extra_row_1:
seed_extras.append(seed_extra_row_1)
with gr.Box():
with gr.Row(elem_id='subseed_row'):
subseed = gr.Number(label='Variation seed', value=-1)
subseed.style(container=False)
random_subseed = gr.Button(random_symbol, elem_id='random_subseed')
reuse_subseed = gr.Button(reuse_symbol, elem_id='reuse_subseed')
subseed_strength = gr.Slider(label='Variation strength', value=0.0, minimum=0, maximum=1, step=0.01)
with gr.Row(visible=False) as seed_extra_row_2:
seed_extras.append(seed_extra_row_2)
seed_resize_from_w = gr.Slider(minimum=0, maximum=2048, step=64, label="Resize seed from width", value=0)
seed_resize_from_h = gr.Slider(minimum=0, maximum=2048, step=64, label="Resize seed from height", value=0)
random_seed.click(fn=lambda: -1, show_progress=False, inputs=[], outputs=[seed])
random_subseed.click(fn=lambda: -1, show_progress=False, inputs=[], outputs=[subseed])
def change_visibility(show):
return {comp: gr_show(show) for comp in seed_extras}
seed_checkbox.change(change_visibility, show_progress=False, inputs=[seed_checkbox], outputs=seed_extras)
return seed, reuse_seed, subseed, reuse_subseed, subseed_strength, seed_resize_from_h, seed_resize_from_w
def connect_reuse_seed(seed: gr.Number, reuse_seed: gr.Button, generation_info: gr.Textbox, dummy_component, is_subseed):
""" Connects a 'reuse (sub)seed' button's click event so that it copies last used
(sub)seed value from generation info the to the seed field. If copying subseed and subseed strength
was 0, i.e. no variation seed was used, it copies the normal seed value instead."""
def copy_seed(gen_info_string: str, index):
res = -1
try:
gen_info = json.loads(gen_info_string)
index -= gen_info.get('index_of_first_image', 0)
if is_subseed and gen_info.get('subseed_strength', 0) > 0:
all_subseeds = gen_info.get('all_subseeds', [-1])
res = all_subseeds[index if 0 <= index < len(all_subseeds) else 0]
else:
all_seeds = gen_info.get('all_seeds', [-1])
res = all_seeds[index if 0 <= index < len(all_seeds) else 0]
except json.decoder.JSONDecodeError as e:
if gen_info_string != '':
print("Error parsing JSON generation info:", file=sys.stderr)
print(gen_info_string, file=sys.stderr)
return [res, gr_show(False)]
reuse_seed.click(
fn=copy_seed,
_js="(x, y) => [x, selected_gallery_index()]",
show_progress=False,
inputs=[generation_info, dummy_component],
outputs=[seed, dummy_component]
)
def create_toprow(is_img2img):
with gr.Row(elem_id="toprow"):
with gr.Column(scale=4):
with gr.Row():
with gr.Column(scale=8):
with gr.Row():
prompt = gr.Textbox(label="Prompt", elem_id="prompt", show_label=False, placeholder="Prompt", lines=2)
roll = gr.Button('Roll', elem_id="roll", visible=len(shared.artist_db.artists) > 0)
with gr.Column(scale=1, elem_id="style_pos_col"):
prompt_style = gr.Dropdown(label="Style 1", elem_id="style_index", choices=[k for k, v in shared.prompt_styles.styles.items()], value=next(iter(shared.prompt_styles.styles.keys())), visible=len(shared.prompt_styles.styles) > 1)
with gr.Row():
with gr.Column(scale=8):
negative_prompt = gr.Textbox(label="Negative prompt", elem_id="negative_prompt", show_label=False, placeholder="Negative prompt", lines=2)
with gr.Column(scale=1, elem_id="style_neg_col"):
prompt_style2 = gr.Dropdown(label="Style 2", elem_id="style2_index", choices=[k for k, v in shared.prompt_styles.styles.items()], value=next(iter(shared.prompt_styles.styles.keys())), visible=len(shared.prompt_styles.styles) > 1)
with gr.Column(scale=1):
with gr.Row():
submit = gr.Button('Generate', elem_id="generate", variant='primary')
with gr.Row():
if is_img2img:
interrogate = gr.Button('Interrogate', elem_id="interrogate")
else:
interrogate = None
prompt_style_apply = gr.Button('Apply style', elem_id="style_apply")
save_style = gr.Button('Create style', elem_id="style_create")
return prompt, roll, prompt_style, negative_prompt, prompt_style2, submit, interrogate, prompt_style_apply, save_style
def setup_progressbar(progressbar, preview):
check_progress = gr.Button('Check progress', elem_id="check_progress", visible=False)
check_progress.click(
fn=check_progress_call,
show_progress=False,
inputs=[],
outputs=[progressbar, preview, preview],
)
check_progress_initial = gr.Button('Check progress (first)', elem_id="check_progress_initial", visible=False)
check_progress_initial.click(
fn=check_progress_call_initial,
show_progress=False,
inputs=[],
outputs=[progressbar, preview, preview],
)
def create_ui(txt2img, img2img, run_extras, run_pnginfo):
with gr.Blocks(analytics_enabled=False) as txt2img_interface:
txt2img_prompt, roll, txt2img_prompt_style, txt2img_negative_prompt, txt2img_prompt_style2, submit, _, txt2img_prompt_style_apply, txt2img_save_style = create_toprow(is_img2img=False)
dummy_component = gr.Label(visible=False)
with gr.Row().style(equal_height=False):
with gr.Column(variant='panel'):
steps = gr.Slider(minimum=1, maximum=150, step=1, label="Sampling Steps", value=20)
sampler_index = gr.Radio(label='Sampling method', elem_id="txt2img_sampling", choices=[x.name for x in samplers], value=samplers[0].name, type="index")
with gr.Row():
restore_faces = gr.Checkbox(label='Restore faces', value=False, visible=len(shared.face_restorers) > 1)
tiling = gr.Checkbox(label='Tiling', value=False)
enable_hr = gr.Checkbox(label='Highres. fix', value=False)
with gr.Row(visible=False) as hr_options:
scale_latent = gr.Checkbox(label='Scale latent', value=False)
denoising_strength = gr.Slider(minimum=0.0, maximum=1.0, step=0.01, label='Denoising strength', value=0.7)
with gr.Row():
batch_count = gr.Slider(minimum=1, maximum=cmd_opts.max_batch_count, step=1, label='Batch count', value=1)
batch_size = gr.Slider(minimum=1, maximum=8, step=1, label='Batch size', value=1)
cfg_scale = gr.Slider(minimum=1.0, maximum=30.0, step=0.5, label='CFG Scale', value=7.0)
with gr.Group():
width = gr.Slider(minimum=64, maximum=2048, step=64, label="Width", value=512)
height = gr.Slider(minimum=64, maximum=2048, step=64, label="Height", value=512)
seed, reuse_seed, subseed, reuse_subseed, subseed_strength, seed_resize_from_h, seed_resize_from_w = create_seed_inputs()
with gr.Group():
custom_inputs = modules.scripts.scripts_txt2img.setup_ui(is_img2img=False)
with gr.Column(variant='panel'):
progressbar = gr.HTML(elem_id="progressbar")
with gr.Group():
txt2img_preview = gr.Image(elem_id='txt2img_preview', visible=False)
txt2img_gallery = gr.Gallery(label='Output', elem_id='txt2img_gallery').style(grid=4)
setup_progressbar(progressbar, txt2img_preview)
with gr.Group():
with gr.Row():
save = gr.Button('Save')
send_to_img2img = gr.Button('Send to img2img')
send_to_inpaint = gr.Button('Send to inpaint')
send_to_extras = gr.Button('Send to extras')
interrupt = gr.Button('Interrupt')
with gr.Group():
html_info = gr.HTML()
generation_info = gr.Textbox(visible=False)
connect_reuse_seed(seed, reuse_seed, generation_info, dummy_component, is_subseed=False)
connect_reuse_seed(subseed, reuse_subseed, generation_info, dummy_component, is_subseed=True)
txt2img_args = dict(
fn=txt2img,
_js="submit",
inputs=[
txt2img_prompt,
txt2img_negative_prompt,
txt2img_prompt_style,
txt2img_prompt_style2,
steps,
sampler_index,
restore_faces,
tiling,
batch_count,
batch_size,
cfg_scale,
seed,
subseed, subseed_strength, seed_resize_from_h, seed_resize_from_w,
height,
width,
enable_hr,
scale_latent,
denoising_strength,
] + custom_inputs,
outputs=[
txt2img_gallery,
generation_info,
html_info
],
show_progress=False,
)
txt2img_prompt.submit(**txt2img_args)
submit.click(**txt2img_args)
enable_hr.change(
fn=lambda x: gr_show(x),
inputs=[enable_hr],
outputs=[hr_options],
)
interrupt.click(
fn=lambda: shared.state.interrupt(),
inputs=[],
outputs=[],
)
save.click(
fn=wrap_gradio_call(save_files),
_js="(x, y, z) => [x, y, selected_gallery_index()]",
inputs=[
generation_info,
txt2img_gallery,
html_info,
],
outputs=[
html_info,
html_info,
html_info,
]
)
roll.click(
fn=roll_artist,
inputs=[
txt2img_prompt,
],
outputs=[
txt2img_prompt,
]
)
with gr.Blocks(analytics_enabled=False) as img2img_interface:
img2img_prompt, roll, img2img_prompt_style, img2img_negative_prompt, img2img_prompt_style2, submit, img2img_interrogate, img2img_prompt_style_apply, img2img_save_style = create_toprow(is_img2img=True)
with gr.Row().style(equal_height=False):
with gr.Column(variant='panel'):
with gr.Group():
switch_mode = gr.Radio(label='Mode', elem_id="img2img_mode", choices=['Redraw whole image', 'Inpaint a part of image', 'SD upscale'], value='Redraw whole image', type="index", show_label=False)
init_img = gr.Image(label="Image for img2img", source="upload", interactive=True, type="pil")
init_img_with_mask = gr.Image(label="Image for inpainting with mask", elem_id="img2maskimg", source="upload", interactive=True, type="pil", tool="sketch", visible=False, image_mode="RGBA")
init_mask = gr.Image(label="Mask", source="upload", interactive=True, type="pil", visible=False)
init_img_with_mask_comment = gr.HTML(elem_id="mask_bug_info", value="<small>if the editor shows ERROR, switch to another tab and back, then to another img2img mode above and back</small>", visible=False)
with gr.Row():
resize_mode = gr.Radio(label="Resize mode", elem_id="resize_mode", show_label=False, choices=["Just resize", "Crop and resize", "Resize and fill"], type="index", value="Just resize")
mask_mode = gr.Radio(label="Mask mode", show_label=False, choices=["Draw mask", "Upload mask"], type="index", value="Draw mask")
steps = gr.Slider(minimum=1, maximum=150, step=1, label="Sampling Steps", value=20)
sampler_index = gr.Radio(label='Sampling method', choices=[x.name for x in samplers_for_img2img], value=samplers_for_img2img[0].name, type="index")
mask_blur = gr.Slider(label='Mask blur', minimum=0, maximum=64, step=1, value=4, visible=False)
inpainting_fill = gr.Radio(label='Masked content', choices=['fill', 'original', 'latent noise', 'latent nothing'], value='fill', type="index", visible=False)
with gr.Row():
inpaint_full_res = gr.Checkbox(label='Inpaint at full resolution', value=False, visible=False)
inpainting_mask_invert = gr.Radio(label='Masking mode', choices=['Inpaint masked', 'Inpaint not masked'], value='Inpaint masked', type="index", visible=False)
with gr.Row():
restore_faces = gr.Checkbox(label='Restore faces', value=False, visible=len(shared.face_restorers) > 1)
tiling = gr.Checkbox(label='Tiling', value=False)
sd_upscale_overlap = gr.Slider(minimum=0, maximum=256, step=16, label='Tile overlap', value=64, visible=False)
with gr.Row():
sd_upscale_upscaler_name = gr.Radio(label='Upscaler', choices=[x.name for x in shared.sd_upscalers], value=shared.sd_upscalers[0].name, type="index", visible=False)
with gr.Row():
batch_count = gr.Slider(minimum=1, maximum=cmd_opts.max_batch_count, step=1, label='Batch count', value=1)
batch_size = gr.Slider(minimum=1, maximum=8, step=1, label='Batch size', value=1)
with gr.Group():
cfg_scale = gr.Slider(minimum=1.0, maximum=30.0, step=0.5, label='CFG Scale', value=7.0)
denoising_strength = gr.Slider(minimum=0.0, maximum=1.0, step=0.01, label='Denoising strength', value=0.75)
with gr.Group():
width = gr.Slider(minimum=64, maximum=2048, step=64, label="Width", value=512)
height = gr.Slider(minimum=64, maximum=2048, step=64, label="Height", value=512)
seed, reuse_seed, subseed, reuse_subseed, subseed_strength, seed_resize_from_h, seed_resize_from_w = create_seed_inputs()
with gr.Group():
custom_inputs = modules.scripts.scripts_img2img.setup_ui(is_img2img=True)
with gr.Column(variant='panel'):
progressbar = gr.HTML(elem_id="progressbar")
with gr.Group():
img2img_preview = gr.Image(elem_id='img2img_preview', visible=False)
img2img_gallery = gr.Gallery(label='Output', elem_id='img2img_gallery').style(grid=4)
setup_progressbar(progressbar, img2img_preview)
with gr.Group():
with gr.Row():
save = gr.Button('Save')
img2img_send_to_img2img = gr.Button('Send to img2img')
img2img_send_to_inpaint = gr.Button('Send to inpaint')
img2img_send_to_extras = gr.Button('Send to extras')
interrupt = gr.Button('Interrupt')
img2img_save_style = gr.Button('Save prompt as style')
with gr.Group():
html_info = gr.HTML()
generation_info = gr.Textbox(visible=False)
connect_reuse_seed(seed, reuse_seed, generation_info, dummy_component, is_subseed=False)
connect_reuse_seed(subseed, reuse_subseed, generation_info, dummy_component, is_subseed=True)
def apply_mode(mode, uploadmask):
is_classic = mode == 0
is_inpaint = mode == 1
is_upscale = mode == 2
return {
init_img: gr_show(not is_inpaint or (is_inpaint and uploadmask == 1)),
init_img_with_mask: gr_show(is_inpaint and uploadmask == 0),
init_img_with_mask_comment: gr_show(is_inpaint and uploadmask == 0),
init_mask: gr_show(is_inpaint and uploadmask == 1),
mask_mode: gr_show(is_inpaint),
mask_blur: gr_show(is_inpaint),
inpainting_fill: gr_show(is_inpaint),
sd_upscale_upscaler_name: gr_show(is_upscale),
sd_upscale_overlap: gr_show(is_upscale),
inpaint_full_res: gr_show(is_inpaint),
inpainting_mask_invert: gr_show(is_inpaint),
img2img_interrogate: gr_show(not is_inpaint),
}
switch_mode.change(
apply_mode,
inputs=[switch_mode, mask_mode],
outputs=[
init_img,
init_img_with_mask,
init_img_with_mask_comment,
init_mask,
mask_mode,
mask_blur,
inpainting_fill,
sd_upscale_upscaler_name,
sd_upscale_overlap,
inpaint_full_res,
inpainting_mask_invert,
img2img_interrogate,
]
)
mask_mode.change(
lambda mode: {
init_img: gr_show(mode == 1),
init_img_with_mask: gr_show(mode == 0),
init_mask: gr_show(mode == 1),
},
inputs=[mask_mode],
outputs=[
init_img,
init_img_with_mask,
init_mask,
],
)
img2img_args = dict(
fn=img2img,
_js="submit",
inputs=[
img2img_prompt,
img2img_negative_prompt,
img2img_prompt_style,
img2img_prompt_style2,
init_img,
init_img_with_mask,
init_mask,
mask_mode,
steps,
sampler_index,
mask_blur,
inpainting_fill,
restore_faces,
tiling,
switch_mode,
batch_count,
batch_size,
cfg_scale,
denoising_strength,
seed,
subseed, subseed_strength, seed_resize_from_h, seed_resize_from_w,
height,
width,
resize_mode,
sd_upscale_upscaler_name,
sd_upscale_overlap,
inpaint_full_res,
inpainting_mask_invert,
] + custom_inputs,
outputs=[
img2img_gallery,
generation_info,
html_info
],
show_progress=False,
)
img2img_prompt.submit(**img2img_args)
submit.click(**img2img_args)
img2img_interrogate.click(
fn=interrogate,
inputs=[init_img],
outputs=[img2img_prompt],
)
interrupt.click(
fn=lambda: shared.state.interrupt(),
inputs=[],
outputs=[],
)
save.click(
fn=wrap_gradio_call(save_files),
_js="(x, y, z) => [x, y, selected_gallery_index()]",
inputs=[
generation_info,
img2img_gallery,
html_info
],
outputs=[
html_info,
html_info,
html_info,
]
)
roll.click(
fn=roll_artist,
inputs=[
img2img_prompt,
],
outputs=[
img2img_prompt,
]
)
prompts = [(txt2img_prompt, txt2img_negative_prompt), (img2img_prompt, img2img_negative_prompt)]
style_dropdowns = [(txt2img_prompt_style, txt2img_prompt_style2), (img2img_prompt_style, img2img_prompt_style2)]
for button, (prompt, negative_prompt) in zip([txt2img_save_style, img2img_save_style], prompts):
button.click(
fn=add_style,
_js="ask_for_style_name",
# Have to pass empty dummy component here, because the JavaScript and Python function have to accept
# the same number of parameters, but we only know the style-name after the JavaScript prompt
inputs=[dummy_component, prompt, negative_prompt],
outputs=[txt2img_prompt_style, img2img_prompt_style, txt2img_prompt_style2, img2img_prompt_style2],
)
for button, (prompt, negative_prompt), (style1, style2) in zip([txt2img_prompt_style_apply, img2img_prompt_style_apply], prompts, style_dropdowns):
button.click(
fn=apply_styles,
inputs=[prompt, negative_prompt, style1, style2],
outputs=[prompt, negative_prompt, style1, style2],
)
with gr.Blocks(analytics_enabled=False) as extras_interface:
with gr.Row().style(equal_height=False):
with gr.Column(variant='panel'):
with gr.Tabs():
with gr.TabItem('Single Image'):
image = gr.Image(label="Source", source="upload", interactive=True, type="pil")
with gr.TabItem('Batch Process'):
image_batch = gr.File(label="Batch Process", file_count="multiple", interactive=True, type="file")
upscaling_resize = gr.Slider(minimum=1.0, maximum=4.0, step=0.05, label="Resize", value=2)
with gr.Group():
extras_upscaler_1 = gr.Radio(label='Upscaler 1', choices=[x.name for x in shared.sd_upscalers], value=shared.sd_upscalers[0].name, type="index")
with gr.Group():
extras_upscaler_2 = gr.Radio(label='Upscaler 2', choices=[x.name for x in shared.sd_upscalers], value=shared.sd_upscalers[0].name, type="index")
extras_upscaler_2_visibility = gr.Slider(minimum=0.0, maximum=1.0, step=0.001, label="Upscaler 2 visibility", value=1)
with gr.Group():
gfpgan_visibility = gr.Slider(minimum=0.0, maximum=1.0, step=0.001, label="GFPGAN visibility", value=0, interactive=modules.gfpgan_model.have_gfpgan)
with gr.Group():
codeformer_visibility = gr.Slider(minimum=0.0, maximum=1.0, step=0.001, label="CodeFormer visibility", value=0, interactive=modules.codeformer_model.have_codeformer)
codeformer_weight = gr.Slider(minimum=0.0, maximum=1.0, step=0.001, label="CodeFormer weight (0 = maximum effect, 1 = minimum effect)", value=0, interactive=modules.codeformer_model.have_codeformer)
submit = gr.Button('Generate', elem_id="extras_generate", variant='primary')
with gr.Column(variant='panel'):
result_images = gr.Gallery(label="Result")
html_info_x = gr.HTML()
html_info = gr.HTML()
extras_args = dict(
fn=run_extras,
inputs=[
image,
image_batch,
gfpgan_visibility,
codeformer_visibility,
codeformer_weight,
upscaling_resize,
extras_upscaler_1,
extras_upscaler_2,
extras_upscaler_2_visibility,
],
outputs=[
result_images,
html_info_x,
html_info,
]
)
submit.click(**extras_args)
pnginfo_interface = gr.Interface(
wrap_gradio_call(run_pnginfo),
inputs=[
gr.Image(label="Source", source="upload", interactive=True, type="pil"),
],
outputs=[
gr.HTML(),
gr.HTML(),
gr.HTML(),
],
allow_flagging="never",
analytics_enabled=False,
live=True,
)
def create_setting_component(key):
def fun():
return opts.data[key] if key in opts.data else opts.data_labels[key].default
info = opts.data_labels[key]
t = type(info.default)
args = info.component_args() if callable(info.component_args) else info.component_args
if info.component is not None:
comp = info.component
elif t == str:
comp = gr.Textbox
elif t == int:
comp = gr.Number
elif t == bool:
comp = gr.Checkbox
else:
raise Exception(f'bad options item type: {str(t)} for key {key}')
return comp(label=info.label, value=fun, **(args or {}))
components = []
keys = list(opts.data_labels.keys())
settings_cols = 3
items_per_col = math.ceil(len(keys) / settings_cols)
def run_settings(*args):
up = []
for key, value, comp in zip(opts.data_labels.keys(), args, components):
comp_args = opts.data_labels[key].component_args
if comp_args and isinstance(comp_args, dict) and comp_args.get('visible') is False:
continue
oldval = opts.data.get(key, None)
opts.data[key] = value
if oldval != value and opts.data_labels[key].onchange is not None:
opts.data_labels[key].onchange()
up.append(comp.update(value=value))
opts.save(shared.config_filename)
return 'Settings applied.'
with gr.Blocks(analytics_enabled=False) as settings_interface:
settings_submit = gr.Button(value="Apply settings", variant='primary')
result = gr.HTML()
with gr.Row(elem_id="settings").style(equal_height=False):
for colno in range(settings_cols):
with gr.Column(variant='panel'):
for rowno in range(items_per_col):
index = rowno + colno * items_per_col
if index < len(keys):
components.append(create_setting_component(keys[index]))
settings_submit.click(
fn=run_settings,
inputs=components,
outputs=[result]
)
request_notifications = gr.Button(value='Request browser notifications')
request_notifications.click(
fn=lambda: None,
inputs=[],
outputs=[],
_js='() => Notification.requestPermission()'
)
interfaces = [
(txt2img_interface, "txt2img", "txt2img"),
(img2img_interface, "img2img", "img2img"),
(extras_interface, "Extras", "extras"),
(pnginfo_interface, "PNG Info", "pnginfo"),
(settings_interface, "Settings", "settings"),
]
with open(os.path.join(script_path, "style.css"), "r", encoding="utf8") as file:
css = file.read()
if os.path.exists(os.path.join(script_path, "user.css")):
with open(os.path.join(script_path, "user.css"), "r", encoding="utf8") as file:
usercss = file.read()
css += usercss
if not cmd_opts.no_progressbar_hiding:
css += css_hide_progressbar
with gr.Blocks(css=css, analytics_enabled=False, title="Stable Diffusion") as demo:
with gr.Tabs() as tabs:
for interface, label, ifid in interfaces:
with gr.TabItem(label, id=ifid):
interface.render()
text_settings = gr.Textbox(elem_id="settings_json", value=lambda: opts.dumpjson(), visible=False)
settings_submit.click(
fn=lambda: opts.dumpjson(),
inputs=[],
outputs=[text_settings],
)
tabs.change(
fn=lambda x: x,
inputs=[init_img_with_mask],
outputs=[init_img_with_mask],
)
send_to_img2img.click(
fn=lambda x: image_from_url_text(x),
_js="extract_image_from_gallery_img2img",
inputs=[txt2img_gallery],
outputs=[init_img],
)
send_to_inpaint.click(
fn=lambda x: image_from_url_text(x),
_js="extract_image_from_gallery_img2img",
inputs=[txt2img_gallery],
outputs=[init_img_with_mask],
)
img2img_send_to_img2img.click(
fn=lambda x: image_from_url_text(x),
_js="extract_image_from_gallery",
inputs=[img2img_gallery],
outputs=[init_img],
)
img2img_send_to_inpaint.click(
fn=lambda x: image_from_url_text(x),
_js="extract_image_from_gallery",
inputs=[img2img_gallery],
outputs=[init_img_with_mask],
)
send_to_extras.click(
fn=lambda x: image_from_url_text(x),
_js="extract_image_from_gallery_extras",
inputs=[txt2img_gallery],
outputs=[image],
)
img2img_send_to_extras.click(
fn=lambda x: image_from_url_text(x),
_js="extract_image_from_gallery_extras",
inputs=[img2img_gallery],
outputs=[image],
)
ui_config_file = cmd_opts.ui_config_file
ui_settings = {}
settings_count = len(ui_settings)
error_loading = False
try:
if os.path.exists(ui_config_file):
with open(ui_config_file, "r", encoding="utf8") as file:
ui_settings = json.load(file)
except Exception:
error_loading = True
print("Error loading settings:", file=sys.stderr)
print(traceback.format_exc(), file=sys.stderr)
def loadsave(path, x):
def apply_field(obj, field, condition=None):
key = path + "/" + field
saved_value = ui_settings.get(key, None)
if saved_value is None:
ui_settings[key] = getattr(obj, field)
elif condition is None or condition(saved_value):
setattr(obj, field, saved_value)
if type(x) == gr.Slider:
apply_field(x, 'value')
apply_field(x, 'minimum')
apply_field(x, 'maximum')
apply_field(x, 'step')
if type(x) == gr.Radio:
apply_field(x, 'value', lambda val: val in x.choices)
visit(txt2img_interface, loadsave, "txt2img")
visit(img2img_interface, loadsave, "img2img")
visit(extras_interface, loadsave, "extras")
if not error_loading and (not os.path.exists(ui_config_file) or settings_count != len(ui_settings)):
with open(ui_config_file, "w", encoding="utf8") as file:
json.dump(ui_settings, file, indent=4)
return demo
with open(os.path.join(script_path, "script.js"), "r", encoding="utf8") as jsfile:
javascript = f'<script>{jsfile.read()}</script>'
jsdir = os.path.join(script_path, "javascript")
for filename in os.listdir(jsdir):
with open(os.path.join(jsdir, filename), "r", encoding="utf8") as jsfile:
javascript += f"\n<script>{jsfile.read()}</script>"
def template_response(*args, **kwargs):
res = gradio_routes_templates_response(*args, **kwargs)
res.body = res.body.replace(b'</head>', f'{javascript}</head>'.encode("utf8"))
res.init_headers()
return res
gradio_routes_templates_response = gradio.routes.templates.TemplateResponse
gradio.routes.templates.TemplateResponse = template_response
<|code_end|>
|
modules/ui.py
<|code_start|>import base64
import html
import io
import json
import math
import mimetypes
import os
import random
import sys
import time
import traceback
import numpy as np
import torch
from PIL import Image
import gradio as gr
import gradio.utils
import gradio.routes
from modules.paths import script_path
from modules.shared import opts, cmd_opts
import modules.shared as shared
from modules.sd_samplers import samplers, samplers_for_img2img
import modules.realesrgan_model as realesrgan
import modules.scripts
import modules.gfpgan_model
import modules.codeformer_model
import modules.styles
# this is a fix for Windows users. Without it, javascript files will be served with text/html content-type and the bowser will not show any UI
mimetypes.init()
mimetypes.add_type('application/javascript', '.js')
if not cmd_opts.share and not cmd_opts.listen:
# fix gradio phoning home
gradio.utils.version_check = lambda: None
gradio.utils.get_local_ip_address = lambda: '127.0.0.1'
def gr_show(visible=True):
return {"visible": visible, "__type__": "update"}
sample_img2img = "assets/stable-samples/img2img/sketch-mountains-input.jpg"
sample_img2img = sample_img2img if os.path.exists(sample_img2img) else None
css_hide_progressbar = """
.wrap .m-12 svg { display:none!important; }
.wrap .m-12::before { content:"Loading..." }
.progress-bar { display:none!important; }
.meta-text { display:none!important; }
"""
# Using constants for these since the variation selector isn't visible.
# Important that they exactly match script.js for tooltip to work.
random_symbol = '\U0001f3b2\ufe0f' # 🎲️
reuse_symbol = '\u267b\ufe0f' # ♻️
def plaintext_to_html(text):
text = "<p>" + "<br>\n".join([f"{html.escape(x)}" for x in text.split('\n')]) + "</p>"
return text
def image_from_url_text(filedata):
if type(filedata) == list:
if len(filedata) == 0:
return None
filedata = filedata[0]
if filedata.startswith("data:image/png;base64,"):
filedata = filedata[len("data:image/png;base64,"):]
filedata = base64.decodebytes(filedata.encode('utf-8'))
image = Image.open(io.BytesIO(filedata))
return image
def send_gradio_gallery_to_image(x):
if len(x) == 0:
return None
return image_from_url_text(x[0])
def save_files(js_data, images, index):
import csv
os.makedirs(opts.outdir_save, exist_ok=True)
filenames = []
data = json.loads(js_data)
if index > -1 and opts.save_selected_only and (index > 0 or not opts.return_grid): # ensures we are looking at a specific non-grid picture, and we have save_selected_only
images = [images[index]]
data["seed"] += (index - 1 if opts.return_grid else index)
with open(os.path.join(opts.outdir_save, "log.csv"), "a", encoding="utf8", newline='') as file:
at_start = file.tell() == 0
writer = csv.writer(file)
if at_start:
writer.writerow(["prompt", "seed", "width", "height", "sampler", "cfgs", "steps", "filename", "negative_prompt"])
filename_base = str(int(time.time() * 1000))
for i, filedata in enumerate(images):
filename = filename_base + ("" if len(images) == 1 else "-" + str(i + 1)) + ".png"
filepath = os.path.join(opts.outdir_save, filename)
if filedata.startswith("data:image/png;base64,"):
filedata = filedata[len("data:image/png;base64,"):]
with open(filepath, "wb") as imgfile:
imgfile.write(base64.decodebytes(filedata.encode('utf-8')))
filenames.append(filename)
writer.writerow([data["prompt"], data["seed"], data["width"], data["height"], data["sampler"], data["cfg_scale"], data["steps"], filenames[0], data["negative_prompt"]])
return '', '', plaintext_to_html(f"Saved: {filenames[0]}")
def wrap_gradio_call(func):
def f(*args, **kwargs):
run_memmon = opts.memmon_poll_rate > 0 and not shared.mem_mon.disabled
if run_memmon:
shared.mem_mon.monitor()
t = time.perf_counter()
try:
res = list(func(*args, **kwargs))
except Exception as e:
print("Error completing request", file=sys.stderr)
print("Arguments:", args, kwargs, file=sys.stderr)
print(traceback.format_exc(), file=sys.stderr)
shared.state.job = ""
shared.state.job_count = 0
res = [None, '', f"<div class='error'>{plaintext_to_html(type(e).__name__+': '+str(e))}</div>"]
elapsed = time.perf_counter() - t
if run_memmon:
mem_stats = {k: -(v//-(1024*1024)) for k, v in shared.mem_mon.stop().items()}
active_peak = mem_stats['active_peak']
reserved_peak = mem_stats['reserved_peak']
sys_peak = mem_stats['system_peak']
sys_total = mem_stats['total']
sys_pct = round(sys_peak/max(sys_total, 1) * 100, 2)
vram_html = f"<p class='vram'>Torch active/reserved: {active_peak}/{reserved_peak} MiB, <wbr>Sys VRAM: {sys_peak}/{sys_total} MiB ({sys_pct}%)</p>"
else:
vram_html = ''
# last item is always HTML
res[-1] += f"<div class='performance'><p class='time'>Time taken: <wbr>{elapsed:.2f}s</p>{vram_html}</div>"
shared.state.interrupted = False
return tuple(res)
return f
def check_progress_call():
if shared.state.job_count == 0:
return "", gr_show(False), gr_show(False)
progress = 0
if shared.state.job_count > 0:
progress += shared.state.job_no / shared.state.job_count
if shared.state.sampling_steps > 0:
progress += 1 / shared.state.job_count * shared.state.sampling_step / shared.state.sampling_steps
progress = min(progress, 1)
progressbar = ""
if opts.show_progressbar:
progressbar = f"""<div class='progressDiv'><div class='progress' style="width:{progress * 100}%">{str(int(progress*100))+"%" if progress > 0.01 else ""}</div></div>"""
image = gr_show(False)
preview_visibility = gr_show(False)
if opts.show_progress_every_n_steps > 0:
if shared.parallel_processing_allowed:
if shared.state.sampling_step - shared.state.current_image_sampling_step >= opts.show_progress_every_n_steps and shared.state.current_latent is not None:
shared.state.current_image = modules.sd_samplers.sample_to_image(shared.state.current_latent)
shared.state.current_image_sampling_step = shared.state.sampling_step
image = shared.state.current_image
if image is None or progress >= 1:
image = gr.update(value=None)
else:
preview_visibility = gr_show(True)
return f"<span style='display: none'>{time.time()}</span><p>{progressbar}</p>", preview_visibility, image
def check_progress_call_initial():
shared.state.job_count = -1
shared.state.current_latent = None
shared.state.current_image = None
return check_progress_call()
def roll_artist(prompt):
allowed_cats = set([x for x in shared.artist_db.categories() if len(opts.random_artist_categories)==0 or x in opts.random_artist_categories])
artist = random.choice([x for x in shared.artist_db.artists if x.category in allowed_cats])
return prompt + ", " + artist.name if prompt != '' else artist.name
def visit(x, func, path=""):
if hasattr(x, 'children'):
for c in x.children:
visit(c, func, path)
elif x.label is not None:
func(path + "/" + str(x.label), x)
def add_style(name: str, prompt: str, negative_prompt: str):
if name is None:
return [gr_show(), gr_show()]
style = modules.styles.PromptStyle(name, prompt, negative_prompt)
shared.prompt_styles.styles[style.name] = style
# Save all loaded prompt styles: this allows us to update the storage format in the future more easily, because we
# reserialize all styles every time we save them
shared.prompt_styles.save_styles(shared.styles_filename)
update = {"visible": True, "choices": list(shared.prompt_styles.styles), "__type__": "update"}
return [update, update, update, update]
def apply_styles(prompt, prompt_neg, style1_name, style2_name):
prompt = shared.prompt_styles.apply_styles_to_prompt(prompt, [style1_name, style2_name])
prompt_neg = shared.prompt_styles.apply_negative_styles_to_prompt(prompt_neg, [style1_name, style2_name])
return [gr.Textbox.update(value=prompt), gr.Textbox.update(value=prompt_neg), gr.Dropdown.update(value="None"), gr.Dropdown.update(value="None")]
def interrogate(image):
prompt = shared.interrogator.interrogate(image)
return gr_show(True) if prompt is None else prompt
def create_seed_inputs():
with gr.Row():
with gr.Box():
with gr.Row(elem_id='seed_row'):
seed = (gr.Textbox if cmd_opts.use_textbox_seed else gr.Number)(label='Seed', value=-1)
seed.style(container=False)
random_seed = gr.Button(random_symbol, elem_id='random_seed')
reuse_seed = gr.Button(reuse_symbol, elem_id='reuse_seed')
with gr.Box(elem_id='subseed_show_box'):
seed_checkbox = gr.Checkbox(label='Extra', elem_id='subseed_show', value=False)
# Components to show/hide based on the 'Extra' checkbox
seed_extras = []
with gr.Row(visible=False) as seed_extra_row_1:
seed_extras.append(seed_extra_row_1)
with gr.Box():
with gr.Row(elem_id='subseed_row'):
subseed = gr.Number(label='Variation seed', value=-1)
subseed.style(container=False)
random_subseed = gr.Button(random_symbol, elem_id='random_subseed')
reuse_subseed = gr.Button(reuse_symbol, elem_id='reuse_subseed')
subseed_strength = gr.Slider(label='Variation strength', value=0.0, minimum=0, maximum=1, step=0.01)
with gr.Row(visible=False) as seed_extra_row_2:
seed_extras.append(seed_extra_row_2)
seed_resize_from_w = gr.Slider(minimum=0, maximum=2048, step=64, label="Resize seed from width", value=0)
seed_resize_from_h = gr.Slider(minimum=0, maximum=2048, step=64, label="Resize seed from height", value=0)
random_seed.click(fn=lambda: -1, show_progress=False, inputs=[], outputs=[seed])
random_subseed.click(fn=lambda: -1, show_progress=False, inputs=[], outputs=[subseed])
def change_visibility(show):
return {comp: gr_show(show) for comp in seed_extras}
seed_checkbox.change(change_visibility, show_progress=False, inputs=[seed_checkbox], outputs=seed_extras)
return seed, reuse_seed, subseed, reuse_subseed, subseed_strength, seed_resize_from_h, seed_resize_from_w
def connect_reuse_seed(seed: gr.Number, reuse_seed: gr.Button, generation_info: gr.Textbox, dummy_component, is_subseed):
""" Connects a 'reuse (sub)seed' button's click event so that it copies last used
(sub)seed value from generation info the to the seed field. If copying subseed and subseed strength
was 0, i.e. no variation seed was used, it copies the normal seed value instead."""
def copy_seed(gen_info_string: str, index):
res = -1
try:
gen_info = json.loads(gen_info_string)
index -= gen_info.get('index_of_first_image', 0)
if is_subseed and gen_info.get('subseed_strength', 0) > 0:
all_subseeds = gen_info.get('all_subseeds', [-1])
res = all_subseeds[index if 0 <= index < len(all_subseeds) else 0]
else:
all_seeds = gen_info.get('all_seeds', [-1])
res = all_seeds[index if 0 <= index < len(all_seeds) else 0]
except json.decoder.JSONDecodeError as e:
if gen_info_string != '':
print("Error parsing JSON generation info:", file=sys.stderr)
print(gen_info_string, file=sys.stderr)
return [res, gr_show(False)]
reuse_seed.click(
fn=copy_seed,
_js="(x, y) => [x, selected_gallery_index()]",
show_progress=False,
inputs=[generation_info, dummy_component],
outputs=[seed, dummy_component]
)
def create_toprow(is_img2img):
with gr.Row(elem_id="toprow"):
with gr.Column(scale=4):
with gr.Row():
with gr.Column(scale=8):
with gr.Row():
prompt = gr.Textbox(label="Prompt", elem_id="prompt", show_label=False, placeholder="Prompt", lines=2)
roll = gr.Button('Roll', elem_id="roll", visible=len(shared.artist_db.artists) > 0)
with gr.Column(scale=1, elem_id="style_pos_col"):
prompt_style = gr.Dropdown(label="Style 1", elem_id="style_index", choices=[k for k, v in shared.prompt_styles.styles.items()], value=next(iter(shared.prompt_styles.styles.keys())), visible=len(shared.prompt_styles.styles) > 1)
with gr.Row():
with gr.Column(scale=8):
negative_prompt = gr.Textbox(label="Negative prompt", elem_id="negative_prompt", show_label=False, placeholder="Negative prompt", lines=2)
with gr.Column(scale=1, elem_id="style_neg_col"):
prompt_style2 = gr.Dropdown(label="Style 2", elem_id="style2_index", choices=[k for k, v in shared.prompt_styles.styles.items()], value=next(iter(shared.prompt_styles.styles.keys())), visible=len(shared.prompt_styles.styles) > 1)
with gr.Column(scale=1):
with gr.Row():
submit = gr.Button('Generate', elem_id="generate", variant='primary')
with gr.Row():
if is_img2img:
interrogate = gr.Button('Interrogate', elem_id="interrogate")
else:
interrogate = None
prompt_style_apply = gr.Button('Apply style', elem_id="style_apply")
save_style = gr.Button('Create style', elem_id="style_create")
return prompt, roll, prompt_style, negative_prompt, prompt_style2, submit, interrogate, prompt_style_apply, save_style
def setup_progressbar(progressbar, preview):
check_progress = gr.Button('Check progress', elem_id="check_progress", visible=False)
check_progress.click(
fn=check_progress_call,
show_progress=False,
inputs=[],
outputs=[progressbar, preview, preview],
)
check_progress_initial = gr.Button('Check progress (first)', elem_id="check_progress_initial", visible=False)
check_progress_initial.click(
fn=check_progress_call_initial,
show_progress=False,
inputs=[],
outputs=[progressbar, preview, preview],
)
def create_ui(txt2img, img2img, run_extras, run_pnginfo):
with gr.Blocks(analytics_enabled=False) as txt2img_interface:
txt2img_prompt, roll, txt2img_prompt_style, txt2img_negative_prompt, txt2img_prompt_style2, submit, _, txt2img_prompt_style_apply, txt2img_save_style = create_toprow(is_img2img=False)
dummy_component = gr.Label(visible=False)
with gr.Row().style(equal_height=False):
with gr.Column(variant='panel'):
steps = gr.Slider(minimum=1, maximum=150, step=1, label="Sampling Steps", value=20)
sampler_index = gr.Radio(label='Sampling method', elem_id="txt2img_sampling", choices=[x.name for x in samplers], value=samplers[0].name, type="index")
with gr.Row():
restore_faces = gr.Checkbox(label='Restore faces', value=False, visible=len(shared.face_restorers) > 1)
tiling = gr.Checkbox(label='Tiling', value=False)
enable_hr = gr.Checkbox(label='Highres. fix', value=False)
with gr.Row(visible=False) as hr_options:
scale_latent = gr.Checkbox(label='Scale latent', value=False)
denoising_strength = gr.Slider(minimum=0.0, maximum=1.0, step=0.01, label='Denoising strength', value=0.7)
with gr.Row():
batch_count = gr.Slider(minimum=1, maximum=cmd_opts.max_batch_count, step=1, label='Batch count', value=1)
batch_size = gr.Slider(minimum=1, maximum=8, step=1, label='Batch size', value=1)
cfg_scale = gr.Slider(minimum=1.0, maximum=30.0, step=0.5, label='CFG Scale', value=7.0)
with gr.Group():
width = gr.Slider(minimum=64, maximum=2048, step=64, label="Width", value=512)
height = gr.Slider(minimum=64, maximum=2048, step=64, label="Height", value=512)
seed, reuse_seed, subseed, reuse_subseed, subseed_strength, seed_resize_from_h, seed_resize_from_w = create_seed_inputs()
with gr.Group():
custom_inputs = modules.scripts.scripts_txt2img.setup_ui(is_img2img=False)
with gr.Column(variant='panel'):
progressbar = gr.HTML(elem_id="progressbar")
with gr.Group():
txt2img_preview = gr.Image(elem_id='txt2img_preview', visible=False)
txt2img_gallery = gr.Gallery(label='Output', elem_id='txt2img_gallery').style(grid=4)
setup_progressbar(progressbar, txt2img_preview)
with gr.Group():
with gr.Row():
save = gr.Button('Save')
send_to_img2img = gr.Button('Send to img2img')
send_to_inpaint = gr.Button('Send to inpaint')
send_to_extras = gr.Button('Send to extras')
interrupt = gr.Button('Interrupt')
with gr.Group():
html_info = gr.HTML()
generation_info = gr.Textbox(visible=False)
connect_reuse_seed(seed, reuse_seed, generation_info, dummy_component, is_subseed=False)
connect_reuse_seed(subseed, reuse_subseed, generation_info, dummy_component, is_subseed=True)
txt2img_args = dict(
fn=txt2img,
_js="submit",
inputs=[
txt2img_prompt,
txt2img_negative_prompt,
txt2img_prompt_style,
txt2img_prompt_style2,
steps,
sampler_index,
restore_faces,
tiling,
batch_count,
batch_size,
cfg_scale,
seed,
subseed, subseed_strength, seed_resize_from_h, seed_resize_from_w,
height,
width,
enable_hr,
scale_latent,
denoising_strength,
] + custom_inputs,
outputs=[
txt2img_gallery,
generation_info,
html_info
],
show_progress=False,
)
txt2img_prompt.submit(**txt2img_args)
submit.click(**txt2img_args)
enable_hr.change(
fn=lambda x: gr_show(x),
inputs=[enable_hr],
outputs=[hr_options],
)
interrupt.click(
fn=lambda: shared.state.interrupt(),
inputs=[],
outputs=[],
)
save.click(
fn=wrap_gradio_call(save_files),
_js="(x, y, z) => [x, y, selected_gallery_index()]",
inputs=[
generation_info,
txt2img_gallery,
html_info,
],
outputs=[
html_info,
html_info,
html_info,
]
)
roll.click(
fn=roll_artist,
inputs=[
txt2img_prompt,
],
outputs=[
txt2img_prompt,
]
)
with gr.Blocks(analytics_enabled=False) as img2img_interface:
img2img_prompt, roll, img2img_prompt_style, img2img_negative_prompt, img2img_prompt_style2, submit, img2img_interrogate, img2img_prompt_style_apply, img2img_save_style = create_toprow(is_img2img=True)
with gr.Row().style(equal_height=False):
with gr.Column(variant='panel'):
with gr.Group():
switch_mode = gr.Radio(label='Mode', elem_id="img2img_mode", choices=['Redraw whole image', 'Inpaint a part of image', 'SD upscale'], value='Redraw whole image', type="index", show_label=False)
init_img = gr.Image(label="Image for img2img", source="upload", interactive=True, type="pil")
init_img_with_mask = gr.Image(label="Image for inpainting with mask", elem_id="img2maskimg", source="upload", interactive=True, type="pil", tool="sketch", visible=False, image_mode="RGBA")
init_mask = gr.Image(label="Mask", source="upload", interactive=True, type="pil", visible=False)
init_img_with_mask_comment = gr.HTML(elem_id="mask_bug_info", value="<small>if the editor shows ERROR, switch to another tab and back, then to another img2img mode above and back</small>", visible=False)
with gr.Row():
resize_mode = gr.Radio(label="Resize mode", elem_id="resize_mode", show_label=False, choices=["Just resize", "Crop and resize", "Resize and fill"], type="index", value="Just resize")
mask_mode = gr.Radio(label="Mask mode", show_label=False, choices=["Draw mask", "Upload mask"], type="index", value="Draw mask")
steps = gr.Slider(minimum=1, maximum=150, step=1, label="Sampling Steps", value=20)
sampler_index = gr.Radio(label='Sampling method', choices=[x.name for x in samplers_for_img2img], value=samplers_for_img2img[0].name, type="index")
mask_blur = gr.Slider(label='Mask blur', minimum=0, maximum=64, step=1, value=4, visible=False)
inpainting_fill = gr.Radio(label='Masked content', choices=['fill', 'original', 'latent noise', 'latent nothing'], value='fill', type="index", visible=False)
with gr.Row():
inpaint_full_res = gr.Checkbox(label='Inpaint at full resolution', value=False, visible=False)
inpainting_mask_invert = gr.Radio(label='Masking mode', choices=['Inpaint masked', 'Inpaint not masked'], value='Inpaint masked', type="index", visible=False)
with gr.Row():
restore_faces = gr.Checkbox(label='Restore faces', value=False, visible=len(shared.face_restorers) > 1)
tiling = gr.Checkbox(label='Tiling', value=False)
sd_upscale_overlap = gr.Slider(minimum=0, maximum=256, step=16, label='Tile overlap', value=64, visible=False)
with gr.Row():
sd_upscale_upscaler_name = gr.Radio(label='Upscaler', choices=[x.name for x in shared.sd_upscalers], value=shared.sd_upscalers[0].name, type="index", visible=False)
with gr.Row():
batch_count = gr.Slider(minimum=1, maximum=cmd_opts.max_batch_count, step=1, label='Batch count', value=1)
batch_size = gr.Slider(minimum=1, maximum=8, step=1, label='Batch size', value=1)
with gr.Group():
cfg_scale = gr.Slider(minimum=1.0, maximum=30.0, step=0.5, label='CFG Scale', value=7.0)
denoising_strength = gr.Slider(minimum=0.0, maximum=1.0, step=0.01, label='Denoising strength', value=0.75)
with gr.Group():
width = gr.Slider(minimum=64, maximum=2048, step=64, label="Width", value=512)
height = gr.Slider(minimum=64, maximum=2048, step=64, label="Height", value=512)
seed, reuse_seed, subseed, reuse_subseed, subseed_strength, seed_resize_from_h, seed_resize_from_w = create_seed_inputs()
with gr.Group():
custom_inputs = modules.scripts.scripts_img2img.setup_ui(is_img2img=True)
with gr.Column(variant='panel'):
progressbar = gr.HTML(elem_id="progressbar")
with gr.Group():
img2img_preview = gr.Image(elem_id='img2img_preview', visible=False)
img2img_gallery = gr.Gallery(label='Output', elem_id='img2img_gallery').style(grid=4)
setup_progressbar(progressbar, img2img_preview)
with gr.Group():
with gr.Row():
save = gr.Button('Save')
img2img_send_to_img2img = gr.Button('Send to img2img')
img2img_send_to_inpaint = gr.Button('Send to inpaint')
img2img_send_to_extras = gr.Button('Send to extras')
interrupt = gr.Button('Interrupt')
img2img_save_style = gr.Button('Save prompt as style')
with gr.Group():
html_info = gr.HTML()
generation_info = gr.Textbox(visible=False)
connect_reuse_seed(seed, reuse_seed, generation_info, dummy_component, is_subseed=False)
connect_reuse_seed(subseed, reuse_subseed, generation_info, dummy_component, is_subseed=True)
def apply_mode(mode, uploadmask):
is_classic = mode == 0
is_inpaint = mode == 1
is_upscale = mode == 2
return {
init_img: gr_show(not is_inpaint or (is_inpaint and uploadmask == 1)),
init_img_with_mask: gr_show(is_inpaint and uploadmask == 0),
init_img_with_mask_comment: gr_show(is_inpaint and uploadmask == 0),
init_mask: gr_show(is_inpaint and uploadmask == 1),
mask_mode: gr_show(is_inpaint),
mask_blur: gr_show(is_inpaint),
inpainting_fill: gr_show(is_inpaint),
sd_upscale_upscaler_name: gr_show(is_upscale),
sd_upscale_overlap: gr_show(is_upscale),
inpaint_full_res: gr_show(is_inpaint),
inpainting_mask_invert: gr_show(is_inpaint),
img2img_interrogate: gr_show(not is_inpaint),
}
switch_mode.change(
apply_mode,
inputs=[switch_mode, mask_mode],
outputs=[
init_img,
init_img_with_mask,
init_img_with_mask_comment,
init_mask,
mask_mode,
mask_blur,
inpainting_fill,
sd_upscale_upscaler_name,
sd_upscale_overlap,
inpaint_full_res,
inpainting_mask_invert,
img2img_interrogate,
]
)
mask_mode.change(
lambda mode: {
init_img: gr_show(mode == 1),
init_img_with_mask: gr_show(mode == 0),
init_mask: gr_show(mode == 1),
},
inputs=[mask_mode],
outputs=[
init_img,
init_img_with_mask,
init_mask,
],
)
img2img_args = dict(
fn=img2img,
_js="submit",
inputs=[
img2img_prompt,
img2img_negative_prompt,
img2img_prompt_style,
img2img_prompt_style2,
init_img,
init_img_with_mask,
init_mask,
mask_mode,
steps,
sampler_index,
mask_blur,
inpainting_fill,
restore_faces,
tiling,
switch_mode,
batch_count,
batch_size,
cfg_scale,
denoising_strength,
seed,
subseed, subseed_strength, seed_resize_from_h, seed_resize_from_w,
height,
width,
resize_mode,
sd_upscale_upscaler_name,
sd_upscale_overlap,
inpaint_full_res,
inpainting_mask_invert,
] + custom_inputs,
outputs=[
img2img_gallery,
generation_info,
html_info
],
show_progress=False,
)
img2img_prompt.submit(**img2img_args)
submit.click(**img2img_args)
img2img_interrogate.click(
fn=interrogate,
inputs=[init_img],
outputs=[img2img_prompt],
)
interrupt.click(
fn=lambda: shared.state.interrupt(),
inputs=[],
outputs=[],
)
save.click(
fn=wrap_gradio_call(save_files),
_js="(x, y, z) => [x, y, selected_gallery_index()]",
inputs=[
generation_info,
img2img_gallery,
html_info
],
outputs=[
html_info,
html_info,
html_info,
]
)
roll.click(
fn=roll_artist,
inputs=[
img2img_prompt,
],
outputs=[
img2img_prompt,
]
)
prompts = [(txt2img_prompt, txt2img_negative_prompt), (img2img_prompt, img2img_negative_prompt)]
style_dropdowns = [(txt2img_prompt_style, txt2img_prompt_style2), (img2img_prompt_style, img2img_prompt_style2)]
for button, (prompt, negative_prompt) in zip([txt2img_save_style, img2img_save_style], prompts):
button.click(
fn=add_style,
_js="ask_for_style_name",
# Have to pass empty dummy component here, because the JavaScript and Python function have to accept
# the same number of parameters, but we only know the style-name after the JavaScript prompt
inputs=[dummy_component, prompt, negative_prompt],
outputs=[txt2img_prompt_style, img2img_prompt_style, txt2img_prompt_style2, img2img_prompt_style2],
)
for button, (prompt, negative_prompt), (style1, style2) in zip([txt2img_prompt_style_apply, img2img_prompt_style_apply], prompts, style_dropdowns):
button.click(
fn=apply_styles,
inputs=[prompt, negative_prompt, style1, style2],
outputs=[prompt, negative_prompt, style1, style2],
)
with gr.Blocks(analytics_enabled=False) as extras_interface:
with gr.Row().style(equal_height=False):
with gr.Column(variant='panel'):
with gr.Tabs():
with gr.TabItem('Single Image'):
image = gr.Image(label="Source", source="upload", interactive=True, type="pil")
with gr.TabItem('Batch Process'):
image_batch = gr.File(label="Batch Process", file_count="multiple", interactive=True, type="file")
upscaling_resize = gr.Slider(minimum=1.0, maximum=4.0, step=0.05, label="Resize", value=2)
with gr.Group():
extras_upscaler_1 = gr.Radio(label='Upscaler 1', choices=[x.name for x in shared.sd_upscalers], value=shared.sd_upscalers[0].name, type="index")
with gr.Group():
extras_upscaler_2 = gr.Radio(label='Upscaler 2', choices=[x.name for x in shared.sd_upscalers], value=shared.sd_upscalers[0].name, type="index")
extras_upscaler_2_visibility = gr.Slider(minimum=0.0, maximum=1.0, step=0.001, label="Upscaler 2 visibility", value=1)
with gr.Group():
gfpgan_visibility = gr.Slider(minimum=0.0, maximum=1.0, step=0.001, label="GFPGAN visibility", value=0, interactive=modules.gfpgan_model.have_gfpgan)
with gr.Group():
codeformer_visibility = gr.Slider(minimum=0.0, maximum=1.0, step=0.001, label="CodeFormer visibility", value=0, interactive=modules.codeformer_model.have_codeformer)
codeformer_weight = gr.Slider(minimum=0.0, maximum=1.0, step=0.001, label="CodeFormer weight (0 = maximum effect, 1 = minimum effect)", value=0, interactive=modules.codeformer_model.have_codeformer)
submit = gr.Button('Generate', elem_id="extras_generate", variant='primary')
with gr.Column(variant='panel'):
result_images = gr.Gallery(label="Result")
html_info_x = gr.HTML()
html_info = gr.HTML()
extras_args = dict(
fn=run_extras,
inputs=[
image,
image_batch,
gfpgan_visibility,
codeformer_visibility,
codeformer_weight,
upscaling_resize,
extras_upscaler_1,
extras_upscaler_2,
extras_upscaler_2_visibility,
],
outputs=[
result_images,
html_info_x,
html_info,
]
)
submit.click(**extras_args)
pnginfo_interface = gr.Interface(
wrap_gradio_call(run_pnginfo),
inputs=[
gr.Image(elem_id="pnginfo_image", label="Source", source="upload", interactive=True, type="pil"),
],
outputs=[
gr.HTML(),
gr.HTML(),
gr.HTML(),
],
allow_flagging="never",
analytics_enabled=False,
live=True,
)
def create_setting_component(key):
def fun():
return opts.data[key] if key in opts.data else opts.data_labels[key].default
info = opts.data_labels[key]
t = type(info.default)
args = info.component_args() if callable(info.component_args) else info.component_args
if info.component is not None:
comp = info.component
elif t == str:
comp = gr.Textbox
elif t == int:
comp = gr.Number
elif t == bool:
comp = gr.Checkbox
else:
raise Exception(f'bad options item type: {str(t)} for key {key}')
return comp(label=info.label, value=fun, **(args or {}))
components = []
keys = list(opts.data_labels.keys())
settings_cols = 3
items_per_col = math.ceil(len(keys) / settings_cols)
def run_settings(*args):
up = []
for key, value, comp in zip(opts.data_labels.keys(), args, components):
comp_args = opts.data_labels[key].component_args
if comp_args and isinstance(comp_args, dict) and comp_args.get('visible') is False:
continue
oldval = opts.data.get(key, None)
opts.data[key] = value
if oldval != value and opts.data_labels[key].onchange is not None:
opts.data_labels[key].onchange()
up.append(comp.update(value=value))
opts.save(shared.config_filename)
return 'Settings applied.'
with gr.Blocks(analytics_enabled=False) as settings_interface:
settings_submit = gr.Button(value="Apply settings", variant='primary')
result = gr.HTML()
with gr.Row(elem_id="settings").style(equal_height=False):
for colno in range(settings_cols):
with gr.Column(variant='panel'):
for rowno in range(items_per_col):
index = rowno + colno * items_per_col
if index < len(keys):
components.append(create_setting_component(keys[index]))
settings_submit.click(
fn=run_settings,
inputs=components,
outputs=[result]
)
request_notifications = gr.Button(value='Request browser notifications')
request_notifications.click(
fn=lambda: None,
inputs=[],
outputs=[],
_js='() => Notification.requestPermission()'
)
interfaces = [
(txt2img_interface, "txt2img", "txt2img"),
(img2img_interface, "img2img", "img2img"),
(extras_interface, "Extras", "extras"),
(pnginfo_interface, "PNG Info", "pnginfo"),
(settings_interface, "Settings", "settings"),
]
with open(os.path.join(script_path, "style.css"), "r", encoding="utf8") as file:
css = file.read()
if os.path.exists(os.path.join(script_path, "user.css")):
with open(os.path.join(script_path, "user.css"), "r", encoding="utf8") as file:
usercss = file.read()
css += usercss
if not cmd_opts.no_progressbar_hiding:
css += css_hide_progressbar
with gr.Blocks(css=css, analytics_enabled=False, title="Stable Diffusion") as demo:
with gr.Tabs() as tabs:
for interface, label, ifid in interfaces:
with gr.TabItem(label, id=ifid):
interface.render()
text_settings = gr.Textbox(elem_id="settings_json", value=lambda: opts.dumpjson(), visible=False)
settings_submit.click(
fn=lambda: opts.dumpjson(),
inputs=[],
outputs=[text_settings],
)
tabs.change(
fn=lambda x: x,
inputs=[init_img_with_mask],
outputs=[init_img_with_mask],
)
send_to_img2img.click(
fn=lambda x: image_from_url_text(x),
_js="extract_image_from_gallery_img2img",
inputs=[txt2img_gallery],
outputs=[init_img],
)
send_to_inpaint.click(
fn=lambda x: image_from_url_text(x),
_js="extract_image_from_gallery_img2img",
inputs=[txt2img_gallery],
outputs=[init_img_with_mask],
)
img2img_send_to_img2img.click(
fn=lambda x: image_from_url_text(x),
_js="extract_image_from_gallery",
inputs=[img2img_gallery],
outputs=[init_img],
)
img2img_send_to_inpaint.click(
fn=lambda x: image_from_url_text(x),
_js="extract_image_from_gallery",
inputs=[img2img_gallery],
outputs=[init_img_with_mask],
)
send_to_extras.click(
fn=lambda x: image_from_url_text(x),
_js="extract_image_from_gallery_extras",
inputs=[txt2img_gallery],
outputs=[image],
)
img2img_send_to_extras.click(
fn=lambda x: image_from_url_text(x),
_js="extract_image_from_gallery_extras",
inputs=[img2img_gallery],
outputs=[image],
)
ui_config_file = cmd_opts.ui_config_file
ui_settings = {}
settings_count = len(ui_settings)
error_loading = False
try:
if os.path.exists(ui_config_file):
with open(ui_config_file, "r", encoding="utf8") as file:
ui_settings = json.load(file)
except Exception:
error_loading = True
print("Error loading settings:", file=sys.stderr)
print(traceback.format_exc(), file=sys.stderr)
def loadsave(path, x):
def apply_field(obj, field, condition=None):
key = path + "/" + field
saved_value = ui_settings.get(key, None)
if saved_value is None:
ui_settings[key] = getattr(obj, field)
elif condition is None or condition(saved_value):
setattr(obj, field, saved_value)
if type(x) == gr.Slider:
apply_field(x, 'value')
apply_field(x, 'minimum')
apply_field(x, 'maximum')
apply_field(x, 'step')
if type(x) == gr.Radio:
apply_field(x, 'value', lambda val: val in x.choices)
visit(txt2img_interface, loadsave, "txt2img")
visit(img2img_interface, loadsave, "img2img")
visit(extras_interface, loadsave, "extras")
if not error_loading and (not os.path.exists(ui_config_file) or settings_count != len(ui_settings)):
with open(ui_config_file, "w", encoding="utf8") as file:
json.dump(ui_settings, file, indent=4)
return demo
with open(os.path.join(script_path, "script.js"), "r", encoding="utf8") as jsfile:
javascript = f'<script>{jsfile.read()}</script>'
jsdir = os.path.join(script_path, "javascript")
for filename in os.listdir(jsdir):
with open(os.path.join(jsdir, filename), "r", encoding="utf8") as jsfile:
javascript += f"\n<script>{jsfile.read()}</script>"
def template_response(*args, **kwargs):
res = gradio_routes_templates_response(*args, **kwargs)
res.body = res.body.replace(b'</head>', f'{javascript}</head>'.encode("utf8"))
res.init_headers()
return res
gradio_routes_templates_response = gradio.routes.templates.TemplateResponse
gradio.routes.templates.TemplateResponse = template_response
<|code_end|>
|
Generate button doesn't change to Interrupt button when "show progressbar" is disabled in settings.
**Describe the bug**
With "show progressbar" off in settings, the Generate button doesn't change when generation begins and interrupt doesn't work.
With "show progressbar" on in settings, the Generate button changes to the Interrupt button and works as expected.
**To Reproduce**
Steps to reproduce the behavior:
1. Go to Settings tab
2. Uncheck "show progressbar"
3. Go to txt2img tab.
4. Click Generate button
5. Note that Generate button doesn't change to Interrupt button, leaving user with no way to interrupt generation.
**Desktop (please complete the following information):**
- OS: Windows 10
- Browser Firefox
- Commit revision a213d3a21c9e37297fdcb2c2b48bd24290a479cf
|
modules/ui.py
<|code_start|>import base64
import html
import io
import json
import math
import mimetypes
import os
import random
import sys
import time
import traceback
import numpy as np
import torch
from PIL import Image
import gradio as gr
import gradio.utils
import gradio.routes
from modules.paths import script_path
from modules.shared import opts, cmd_opts
import modules.shared as shared
from modules.sd_samplers import samplers, samplers_for_img2img
import modules.ldsr_model
import modules.scripts
import modules.gfpgan_model
import modules.codeformer_model
import modules.styles
# this is a fix for Windows users. Without it, javascript files will be served with text/html content-type and the bowser will not show any UI
mimetypes.init()
mimetypes.add_type('application/javascript', '.js')
if not cmd_opts.share and not cmd_opts.listen:
# fix gradio phoning home
gradio.utils.version_check = lambda: None
gradio.utils.get_local_ip_address = lambda: '127.0.0.1'
def gr_show(visible=True):
return {"visible": visible, "__type__": "update"}
sample_img2img = "assets/stable-samples/img2img/sketch-mountains-input.jpg"
sample_img2img = sample_img2img if os.path.exists(sample_img2img) else None
css_hide_progressbar = """
.wrap .m-12 svg { display:none!important; }
.wrap .m-12::before { content:"Loading..." }
.progress-bar { display:none!important; }
.meta-text { display:none!important; }
"""
# Using constants for these since the variation selector isn't visible.
# Important that they exactly match script.js for tooltip to work.
random_symbol = '\U0001f3b2\ufe0f' # 🎲️
reuse_symbol = '\u267b\ufe0f' # ♻️
def plaintext_to_html(text):
text = "<p>" + "<br>\n".join([f"{html.escape(x)}" for x in text.split('\n')]) + "</p>"
return text
def image_from_url_text(filedata):
if type(filedata) == list:
if len(filedata) == 0:
return None
filedata = filedata[0]
if filedata.startswith("data:image/png;base64,"):
filedata = filedata[len("data:image/png;base64,"):]
filedata = base64.decodebytes(filedata.encode('utf-8'))
image = Image.open(io.BytesIO(filedata))
return image
def send_gradio_gallery_to_image(x):
if len(x) == 0:
return None
return image_from_url_text(x[0])
def save_files(js_data, images, index):
import csv
os.makedirs(opts.outdir_save, exist_ok=True)
filenames = []
data = json.loads(js_data)
if index > -1 and opts.save_selected_only and (index > 0 or not opts.return_grid): # ensures we are looking at a specific non-grid picture, and we have save_selected_only
images = [images[index]]
data["seed"] += (index - 1 if opts.return_grid else index)
with open(os.path.join(opts.outdir_save, "log.csv"), "a", encoding="utf8", newline='') as file:
at_start = file.tell() == 0
writer = csv.writer(file)
if at_start:
writer.writerow(["prompt", "seed", "width", "height", "sampler", "cfgs", "steps", "filename", "negative_prompt"])
filename_base = str(int(time.time() * 1000))
for i, filedata in enumerate(images):
filename = filename_base + ("" if len(images) == 1 else "-" + str(i + 1)) + ".png"
filepath = os.path.join(opts.outdir_save, filename)
if filedata.startswith("data:image/png;base64,"):
filedata = filedata[len("data:image/png;base64,"):]
with open(filepath, "wb") as imgfile:
imgfile.write(base64.decodebytes(filedata.encode('utf-8')))
filenames.append(filename)
writer.writerow([data["prompt"], data["seed"], data["width"], data["height"], data["sampler"], data["cfg_scale"], data["steps"], filenames[0], data["negative_prompt"]])
return '', '', plaintext_to_html(f"Saved: {filenames[0]}")
def wrap_gradio_call(func):
def f(*args, **kwargs):
run_memmon = opts.memmon_poll_rate > 0 and not shared.mem_mon.disabled
if run_memmon:
shared.mem_mon.monitor()
t = time.perf_counter()
try:
res = list(func(*args, **kwargs))
except Exception as e:
print("Error completing request", file=sys.stderr)
print("Arguments:", args, kwargs, file=sys.stderr)
print(traceback.format_exc(), file=sys.stderr)
shared.state.job = ""
shared.state.job_count = 0
res = [None, '', f"<div class='error'>{plaintext_to_html(type(e).__name__+': '+str(e))}</div>"]
elapsed = time.perf_counter() - t
if run_memmon:
mem_stats = {k: -(v//-(1024*1024)) for k, v in shared.mem_mon.stop().items()}
active_peak = mem_stats['active_peak']
reserved_peak = mem_stats['reserved_peak']
sys_peak = mem_stats['system_peak']
sys_total = mem_stats['total']
sys_pct = round(sys_peak/max(sys_total, 1) * 100, 2)
vram_html = f"<p class='vram'>Torch active/reserved: {active_peak}/{reserved_peak} MiB, <wbr>Sys VRAM: {sys_peak}/{sys_total} MiB ({sys_pct}%)</p>"
else:
vram_html = ''
# last item is always HTML
res[-1] += f"<div class='performance'><p class='time'>Time taken: <wbr>{elapsed:.2f}s</p>{vram_html}</div>"
shared.state.interrupted = False
return tuple(res)
return f
def check_progress_call():
if shared.state.job_count == 0:
return "", gr_show(False), gr_show(False)
progress = 0
if shared.state.job_count > 0:
progress += shared.state.job_no / shared.state.job_count
if shared.state.sampling_steps > 0:
progress += 1 / shared.state.job_count * shared.state.sampling_step / shared.state.sampling_steps
progress = min(progress, 1)
progressbar = ""
if opts.show_progressbar:
progressbar = f"""<div class='progressDiv'><div class='progress' style="width:{progress * 100}%">{str(int(progress*100))+"%" if progress > 0.01 else ""}</div></div>"""
image = gr_show(False)
preview_visibility = gr_show(False)
if opts.show_progress_every_n_steps > 0:
if shared.parallel_processing_allowed:
if shared.state.sampling_step - shared.state.current_image_sampling_step >= opts.show_progress_every_n_steps and shared.state.current_latent is not None:
shared.state.current_image = modules.sd_samplers.sample_to_image(shared.state.current_latent)
shared.state.current_image_sampling_step = shared.state.sampling_step
image = shared.state.current_image
if image is None or progress >= 1:
image = gr.update(value=None)
else:
preview_visibility = gr_show(True)
return f"<span style='display: none'>{time.time()}</span><p>{progressbar}</p>", preview_visibility, image
def check_progress_call_initial():
shared.state.job_count = -1
shared.state.current_latent = None
shared.state.current_image = None
return check_progress_call()
def roll_artist(prompt):
allowed_cats = set([x for x in shared.artist_db.categories() if len(opts.random_artist_categories)==0 or x in opts.random_artist_categories])
artist = random.choice([x for x in shared.artist_db.artists if x.category in allowed_cats])
return prompt + ", " + artist.name if prompt != '' else artist.name
def visit(x, func, path=""):
if hasattr(x, 'children'):
for c in x.children:
visit(c, func, path)
elif x.label is not None:
func(path + "/" + str(x.label), x)
def add_style(name: str, prompt: str, negative_prompt: str):
if name is None:
return [gr_show(), gr_show()]
style = modules.styles.PromptStyle(name, prompt, negative_prompt)
shared.prompt_styles.styles[style.name] = style
# Save all loaded prompt styles: this allows us to update the storage format in the future more easily, because we
# reserialize all styles every time we save them
shared.prompt_styles.save_styles(shared.styles_filename)
update = {"visible": True, "choices": list(shared.prompt_styles.styles), "__type__": "update"}
return [update, update, update, update]
def apply_styles(prompt, prompt_neg, style1_name, style2_name):
prompt = shared.prompt_styles.apply_styles_to_prompt(prompt, [style1_name, style2_name])
prompt_neg = shared.prompt_styles.apply_negative_styles_to_prompt(prompt_neg, [style1_name, style2_name])
return [gr.Textbox.update(value=prompt), gr.Textbox.update(value=prompt_neg), gr.Dropdown.update(value="None"), gr.Dropdown.update(value="None")]
def interrogate(image):
prompt = shared.interrogator.interrogate(image)
return gr_show(True) if prompt is None else prompt
def create_seed_inputs():
with gr.Row():
with gr.Box():
with gr.Row(elem_id='seed_row'):
seed = (gr.Textbox if cmd_opts.use_textbox_seed else gr.Number)(label='Seed', value=-1)
seed.style(container=False)
random_seed = gr.Button(random_symbol, elem_id='random_seed')
reuse_seed = gr.Button(reuse_symbol, elem_id='reuse_seed')
with gr.Box(elem_id='subseed_show_box'):
seed_checkbox = gr.Checkbox(label='Extra', elem_id='subseed_show', value=False)
# Components to show/hide based on the 'Extra' checkbox
seed_extras = []
with gr.Row(visible=False) as seed_extra_row_1:
seed_extras.append(seed_extra_row_1)
with gr.Box():
with gr.Row(elem_id='subseed_row'):
subseed = gr.Number(label='Variation seed', value=-1)
subseed.style(container=False)
random_subseed = gr.Button(random_symbol, elem_id='random_subseed')
reuse_subseed = gr.Button(reuse_symbol, elem_id='reuse_subseed')
subseed_strength = gr.Slider(label='Variation strength', value=0.0, minimum=0, maximum=1, step=0.01)
with gr.Row(visible=False) as seed_extra_row_2:
seed_extras.append(seed_extra_row_2)
seed_resize_from_w = gr.Slider(minimum=0, maximum=2048, step=64, label="Resize seed from width", value=0)
seed_resize_from_h = gr.Slider(minimum=0, maximum=2048, step=64, label="Resize seed from height", value=0)
random_seed.click(fn=lambda: -1, show_progress=False, inputs=[], outputs=[seed])
random_subseed.click(fn=lambda: -1, show_progress=False, inputs=[], outputs=[subseed])
def change_visibility(show):
return {comp: gr_show(show) for comp in seed_extras}
seed_checkbox.change(change_visibility, show_progress=False, inputs=[seed_checkbox], outputs=seed_extras)
return seed, reuse_seed, subseed, reuse_subseed, subseed_strength, seed_resize_from_h, seed_resize_from_w, seed_checkbox
def connect_reuse_seed(seed: gr.Number, reuse_seed: gr.Button, generation_info: gr.Textbox, dummy_component, is_subseed):
""" Connects a 'reuse (sub)seed' button's click event so that it copies last used
(sub)seed value from generation info the to the seed field. If copying subseed and subseed strength
was 0, i.e. no variation seed was used, it copies the normal seed value instead."""
def copy_seed(gen_info_string: str, index):
res = -1
try:
gen_info = json.loads(gen_info_string)
index -= gen_info.get('index_of_first_image', 0)
if is_subseed and gen_info.get('subseed_strength', 0) > 0:
all_subseeds = gen_info.get('all_subseeds', [-1])
res = all_subseeds[index if 0 <= index < len(all_subseeds) else 0]
else:
all_seeds = gen_info.get('all_seeds', [-1])
res = all_seeds[index if 0 <= index < len(all_seeds) else 0]
except json.decoder.JSONDecodeError as e:
if gen_info_string != '':
print("Error parsing JSON generation info:", file=sys.stderr)
print(gen_info_string, file=sys.stderr)
return [res, gr_show(False)]
reuse_seed.click(
fn=copy_seed,
_js="(x, y) => [x, selected_gallery_index()]",
show_progress=False,
inputs=[generation_info, dummy_component],
outputs=[seed, dummy_component]
)
def create_toprow(is_img2img):
with gr.Row(elem_id="toprow"):
with gr.Column(scale=4):
with gr.Row():
with gr.Column(scale=8):
with gr.Row():
prompt = gr.Textbox(label="Prompt", elem_id="prompt", show_label=False, placeholder="Prompt", lines=2)
roll = gr.Button('Roll', elem_id="roll", visible=len(shared.artist_db.artists) > 0)
with gr.Column(scale=1, elem_id="style_pos_col"):
prompt_style = gr.Dropdown(label="Style 1", elem_id="style_index", choices=[k for k, v in shared.prompt_styles.styles.items()], value=next(iter(shared.prompt_styles.styles.keys())), visible=len(shared.prompt_styles.styles) > 1)
with gr.Row():
with gr.Column(scale=8):
negative_prompt = gr.Textbox(label="Negative prompt", elem_id="negative_prompt", show_label=False, placeholder="Negative prompt", lines=2)
with gr.Column(scale=1, elem_id="style_neg_col"):
prompt_style2 = gr.Dropdown(label="Style 2", elem_id="style2_index", choices=[k for k, v in shared.prompt_styles.styles.items()], value=next(iter(shared.prompt_styles.styles.keys())), visible=len(shared.prompt_styles.styles) > 1)
with gr.Column(scale=1):
with gr.Row():
interrupt = gr.Button('Interrupt', elem_id="interrupt")
submit = gr.Button('Generate', elem_id="generate", variant='primary')
interrupt.click(
fn=lambda: shared.state.interrupt(),
inputs=[],
outputs=[],
)
with gr.Row():
if is_img2img:
interrogate = gr.Button('Interrogate', elem_id="interrogate")
else:
interrogate = None
prompt_style_apply = gr.Button('Apply style', elem_id="style_apply")
save_style = gr.Button('Create style', elem_id="style_create")
return prompt, roll, prompt_style, negative_prompt, prompt_style2, submit, interrogate, prompt_style_apply, save_style
def setup_progressbar(progressbar, preview):
check_progress = gr.Button('Check progress', elem_id="check_progress", visible=False)
check_progress.click(
fn=check_progress_call,
show_progress=False,
inputs=[],
outputs=[progressbar, preview, preview],
)
check_progress_initial = gr.Button('Check progress (first)', elem_id="check_progress_initial", visible=False)
check_progress_initial.click(
fn=check_progress_call_initial,
show_progress=False,
inputs=[],
outputs=[progressbar, preview, preview],
)
def create_ui(txt2img, img2img, run_extras, run_pnginfo):
with gr.Blocks(analytics_enabled=False) as txt2img_interface:
txt2img_prompt, roll, txt2img_prompt_style, txt2img_negative_prompt, txt2img_prompt_style2, submit, _, txt2img_prompt_style_apply, txt2img_save_style = create_toprow(is_img2img=False)
dummy_component = gr.Label(visible=False)
with gr.Row(elem_id='progressRow'):
with gr.Column(scale=1):
columnEmpty = "Empty"
with gr.Column(scale=1):
progressbar = gr.HTML(elem_id="progressbar")
txt2img_preview = gr.Image(elem_id='txt2img_preview', visible=False)
setup_progressbar(progressbar, txt2img_preview)
with gr.Row().style(equal_height=False):
with gr.Column(variant='panel'):
steps = gr.Slider(minimum=1, maximum=150, step=1, label="Sampling Steps", value=20)
sampler_index = gr.Radio(label='Sampling method', elem_id="txt2img_sampling", choices=[x.name for x in samplers], value=samplers[0].name, type="index")
with gr.Row():
restore_faces = gr.Checkbox(label='Restore faces', value=False, visible=len(shared.face_restorers) > 1)
tiling = gr.Checkbox(label='Tiling', value=False)
enable_hr = gr.Checkbox(label='Highres. fix', value=False)
with gr.Row(visible=False) as hr_options:
scale_latent = gr.Checkbox(label='Scale latent', value=False)
denoising_strength = gr.Slider(minimum=0.0, maximum=1.0, step=0.01, label='Denoising strength', value=0.7)
with gr.Row():
batch_count = gr.Slider(minimum=1, maximum=cmd_opts.max_batch_count, step=1, label='Batch count', value=1)
batch_size = gr.Slider(minimum=1, maximum=8, step=1, label='Batch size', value=1)
cfg_scale = gr.Slider(minimum=1.0, maximum=30.0, step=0.5, label='CFG Scale', value=7.0)
with gr.Group():
width = gr.Slider(minimum=64, maximum=2048, step=64, label="Width", value=512)
height = gr.Slider(minimum=64, maximum=2048, step=64, label="Height", value=512)
seed, reuse_seed, subseed, reuse_subseed, subseed_strength, seed_resize_from_h, seed_resize_from_w, seed_checkbox = create_seed_inputs()
with gr.Group():
custom_inputs = modules.scripts.scripts_txt2img.setup_ui(is_img2img=False)
with gr.Column(variant='panel'):
with gr.Group():
txt2img_preview = gr.Image(elem_id='txt2img_preview', visible=False)
txt2img_gallery = gr.Gallery(label='Output', show_label=False, elem_id='txt2img_gallery').style(grid=4)
with gr.Group():
with gr.Row():
save = gr.Button('Save')
send_to_img2img = gr.Button('Send to img2img')
send_to_inpaint = gr.Button('Send to inpaint')
send_to_extras = gr.Button('Send to extras')
with gr.Group():
html_info = gr.HTML()
generation_info = gr.Textbox(visible=False)
connect_reuse_seed(seed, reuse_seed, generation_info, dummy_component, is_subseed=False)
connect_reuse_seed(subseed, reuse_subseed, generation_info, dummy_component, is_subseed=True)
txt2img_args = dict(
fn=txt2img,
_js="submit",
inputs=[
txt2img_prompt,
txt2img_negative_prompt,
txt2img_prompt_style,
txt2img_prompt_style2,
steps,
sampler_index,
restore_faces,
tiling,
batch_count,
batch_size,
cfg_scale,
seed,
subseed, subseed_strength, seed_resize_from_h, seed_resize_from_w, seed_checkbox,
height,
width,
enable_hr,
scale_latent,
denoising_strength,
] + custom_inputs,
outputs=[
txt2img_gallery,
generation_info,
html_info
],
show_progress=False,
)
txt2img_prompt.submit(**txt2img_args)
submit.click(**txt2img_args)
enable_hr.change(
fn=lambda x: gr_show(x),
inputs=[enable_hr],
outputs=[hr_options],
)
save.click(
fn=wrap_gradio_call(save_files),
_js="(x, y, z) => [x, y, selected_gallery_index()]",
inputs=[
generation_info,
txt2img_gallery,
html_info,
],
outputs=[
html_info,
html_info,
html_info,
]
)
roll.click(
fn=roll_artist,
inputs=[
txt2img_prompt,
],
outputs=[
txt2img_prompt,
]
)
with gr.Blocks(analytics_enabled=False) as img2img_interface:
img2img_prompt, roll, img2img_prompt_style, img2img_negative_prompt, img2img_prompt_style2, submit, img2img_interrogate, img2img_prompt_style_apply, img2img_save_style = create_toprow(is_img2img=True)
with gr.Row(elem_id='progressRow'):
with gr.Column(scale=1):
columnEmpty = "Empty"
with gr.Column(scale=1):
progressbar = gr.HTML(elem_id="progressbar")
img2img_preview = gr.Image(elem_id='img2img_preview', visible=False)
setup_progressbar(progressbar, img2img_preview)
with gr.Row().style(equal_height=False):
with gr.Column(variant='panel'):
with gr.Tabs(elem_id="mode_img2img") as tabs_img2img_mode:
with gr.TabItem('img2img'):
init_img = gr.Image(label="Image for img2img", show_label=False, source="upload", interactive=True, type="pil")
with gr.TabItem('Inpaint'):
init_img_with_mask = gr.Image(label="Image for inpainting with mask", show_label=False, elem_id="img2maskimg", source="upload", interactive=True, type="pil", tool="sketch", image_mode="RGBA")
init_img_with_mask_comment = gr.HTML(elem_id="mask_bug_info", value="<small>if the editor shows ERROR, switch to another tab and back, then to \"Upload mask\" mode above and back</small>")
init_img_inpaint = gr.Image(label="Image for img2img", show_label=False, source="upload", interactive=True, type="pil", visible=False)
init_mask_inpaint = gr.Image(label="Mask", source="upload", interactive=True, type="pil", visible=False)
mask_blur = gr.Slider(label='Mask blur', minimum=0, maximum=64, step=1, value=4)
with gr.Row():
mask_mode = gr.Radio(label="Mask mode", show_label=False, choices=["Draw mask", "Upload mask"], type="index", value="Draw mask")
inpainting_mask_invert = gr.Radio(label='Masking mode', show_label=False, choices=['Inpaint masked', 'Inpaint not masked'], value='Inpaint masked', type="index")
inpainting_fill = gr.Radio(label='Masked content', choices=['fill', 'original', 'latent noise', 'latent nothing'], value='fill', type="index")
with gr.Row():
inpaint_full_res = gr.Checkbox(label='Inpaint at full resolution', value=False)
inpaint_full_res_padding = gr.Slider(label='Inpaint at full resolution padding, pixels', minimum=0, maximum=256, step=4, value=32)
with gr.TabItem('Batch img2img'):
gr.HTML("<p class=\"text-gray-500\">Process images in a directory on the same machine where the server is running.</p>")
img2img_batch_input_dir = gr.Textbox(label="Input directory")
img2img_batch_output_dir = gr.Textbox(label="Output directory")
with gr.Row():
resize_mode = gr.Radio(label="Resize mode", elem_id="resize_mode", show_label=False, choices=["Just resize", "Crop and resize", "Resize and fill"], type="index", value="Just resize")
steps = gr.Slider(minimum=1, maximum=150, step=1, label="Sampling Steps", value=20)
sampler_index = gr.Radio(label='Sampling method', choices=[x.name for x in samplers_for_img2img], value=samplers_for_img2img[0].name, type="index")
with gr.Row():
restore_faces = gr.Checkbox(label='Restore faces', value=False, visible=len(shared.face_restorers) > 1)
tiling = gr.Checkbox(label='Tiling', value=False)
with gr.Row():
batch_count = gr.Slider(minimum=1, maximum=cmd_opts.max_batch_count, step=1, label='Batch count', value=1)
batch_size = gr.Slider(minimum=1, maximum=8, step=1, label='Batch size', value=1)
with gr.Group():
cfg_scale = gr.Slider(minimum=1.0, maximum=30.0, step=0.5, label='CFG Scale', value=7.0)
denoising_strength = gr.Slider(minimum=0.0, maximum=1.0, step=0.01, label='Denoising strength', value=0.75)
with gr.Group():
width = gr.Slider(minimum=64, maximum=2048, step=64, label="Width", value=512)
height = gr.Slider(minimum=64, maximum=2048, step=64, label="Height", value=512)
seed, reuse_seed, subseed, reuse_subseed, subseed_strength, seed_resize_from_h, seed_resize_from_w, seed_checkbox = create_seed_inputs()
with gr.Group():
custom_inputs = modules.scripts.scripts_img2img.setup_ui(is_img2img=True)
with gr.Column(variant='panel'):
with gr.Group():
img2img_preview = gr.Image(elem_id='img2img_preview', visible=False)
img2img_gallery = gr.Gallery(label='Output', show_label=False, elem_id='img2img_gallery').style(grid=4)
with gr.Group():
with gr.Row():
save = gr.Button('Save')
img2img_send_to_img2img = gr.Button('Send to img2img')
img2img_send_to_inpaint = gr.Button('Send to inpaint')
img2img_send_to_extras = gr.Button('Send to extras')
img2img_save_style = gr.Button('Save prompt as style')
with gr.Group():
html_info = gr.HTML()
generation_info = gr.Textbox(visible=False)
connect_reuse_seed(seed, reuse_seed, generation_info, dummy_component, is_subseed=False)
connect_reuse_seed(subseed, reuse_subseed, generation_info, dummy_component, is_subseed=True)
mask_mode.change(
lambda mode, img: {
#init_img_with_mask: gr.Image.update(visible=mode == 0, value=img["image"]),
init_img_with_mask: gr_show(mode == 0),
init_img_with_mask_comment: gr_show(mode == 0),
init_img_inpaint: gr_show(mode == 1),
init_mask_inpaint: gr_show(mode == 1),
},
inputs=[mask_mode, init_img_with_mask],
outputs=[
init_img_with_mask,
init_img_with_mask_comment,
init_img_inpaint,
init_mask_inpaint,
],
)
img2img_args = dict(
fn=img2img,
_js="submit_img2img",
inputs=[
dummy_component,
img2img_prompt,
img2img_negative_prompt,
img2img_prompt_style,
img2img_prompt_style2,
init_img,
init_img_with_mask,
init_img_inpaint,
init_mask_inpaint,
mask_mode,
steps,
sampler_index,
mask_blur,
inpainting_fill,
restore_faces,
tiling,
batch_count,
batch_size,
cfg_scale,
denoising_strength,
seed,
subseed, subseed_strength, seed_resize_from_h, seed_resize_from_w, seed_checkbox,
height,
width,
resize_mode,
inpaint_full_res,
inpaint_full_res_padding,
inpainting_mask_invert,
img2img_batch_input_dir,
img2img_batch_output_dir,
] + custom_inputs,
outputs=[
img2img_gallery,
generation_info,
html_info
],
show_progress=False,
)
img2img_prompt.submit(**img2img_args)
submit.click(**img2img_args)
img2img_interrogate.click(
fn=interrogate,
inputs=[init_img],
outputs=[img2img_prompt],
)
save.click(
fn=wrap_gradio_call(save_files),
_js="(x, y, z) => [x, y, selected_gallery_index()]",
inputs=[
generation_info,
img2img_gallery,
html_info
],
outputs=[
html_info,
html_info,
html_info,
]
)
roll.click(
fn=roll_artist,
inputs=[
img2img_prompt,
],
outputs=[
img2img_prompt,
]
)
prompts = [(txt2img_prompt, txt2img_negative_prompt), (img2img_prompt, img2img_negative_prompt)]
style_dropdowns = [(txt2img_prompt_style, txt2img_prompt_style2), (img2img_prompt_style, img2img_prompt_style2)]
for button, (prompt, negative_prompt) in zip([txt2img_save_style, img2img_save_style], prompts):
button.click(
fn=add_style,
_js="ask_for_style_name",
# Have to pass empty dummy component here, because the JavaScript and Python function have to accept
# the same number of parameters, but we only know the style-name after the JavaScript prompt
inputs=[dummy_component, prompt, negative_prompt],
outputs=[txt2img_prompt_style, img2img_prompt_style, txt2img_prompt_style2, img2img_prompt_style2],
)
for button, (prompt, negative_prompt), (style1, style2) in zip([txt2img_prompt_style_apply, img2img_prompt_style_apply], prompts, style_dropdowns):
button.click(
fn=apply_styles,
inputs=[prompt, negative_prompt, style1, style2],
outputs=[prompt, negative_prompt, style1, style2],
)
with gr.Blocks(analytics_enabled=False) as extras_interface:
with gr.Row().style(equal_height=False):
with gr.Column(variant='panel'):
with gr.Tabs(elem_id="mode_extras"):
with gr.TabItem('Single Image'):
image = gr.Image(label="Source", source="upload", interactive=True, type="pil")
with gr.TabItem('Batch Process'):
image_batch = gr.File(label="Batch Process", file_count="multiple", interactive=True, type="file")
upscaling_resize = gr.Slider(minimum=1.0, maximum=4.0, step=0.05, label="Resize", value=2)
with gr.Group():
extras_upscaler_1 = gr.Radio(label='Upscaler 1', choices=[x.name for x in shared.sd_upscalers], value=shared.sd_upscalers[0].name, type="index")
with gr.Group():
extras_upscaler_2 = gr.Radio(label='Upscaler 2', choices=[x.name for x in shared.sd_upscalers], value=shared.sd_upscalers[0].name, type="index")
extras_upscaler_2_visibility = gr.Slider(minimum=0.0, maximum=1.0, step=0.001, label="Upscaler 2 visibility", value=1)
with gr.Group():
gfpgan_visibility = gr.Slider(minimum=0.0, maximum=1.0, step=0.001, label="GFPGAN visibility", value=0, interactive=modules.gfpgan_model.have_gfpgan)
with gr.Group():
codeformer_visibility = gr.Slider(minimum=0.0, maximum=1.0, step=0.001, label="CodeFormer visibility", value=0, interactive=modules.codeformer_model.have_codeformer)
codeformer_weight = gr.Slider(minimum=0.0, maximum=1.0, step=0.001, label="CodeFormer weight (0 = maximum effect, 1 = minimum effect)", value=0, interactive=modules.codeformer_model.have_codeformer)
submit = gr.Button('Generate', elem_id="extras_generate", variant='primary')
with gr.Column(variant='panel'):
result_images = gr.Gallery(label="Result", show_label=False)
html_info_x = gr.HTML()
html_info = gr.HTML()
submit.click(
fn=run_extras,
_js="get_extras_tab_index",
inputs=[
dummy_component,
image,
image_batch,
gfpgan_visibility,
codeformer_visibility,
codeformer_weight,
upscaling_resize,
extras_upscaler_1,
extras_upscaler_2,
extras_upscaler_2_visibility,
],
outputs=[
result_images,
html_info_x,
html_info,
]
)
pnginfo_interface = gr.Interface(
wrap_gradio_call(run_pnginfo),
inputs=[
gr.Image(elem_id="pnginfo_image", label="Source", source="upload", interactive=True, type="pil"),
],
outputs=[
gr.HTML(),
gr.HTML(),
gr.HTML(),
],
allow_flagging="never",
analytics_enabled=False,
live=True,
)
def create_setting_component(key):
def fun():
return opts.data[key] if key in opts.data else opts.data_labels[key].default
info = opts.data_labels[key]
t = type(info.default)
args = info.component_args() if callable(info.component_args) else info.component_args
if info.component is not None:
comp = info.component
elif t == str:
comp = gr.Textbox
elif t == int:
comp = gr.Number
elif t == bool:
comp = gr.Checkbox
else:
raise Exception(f'bad options item type: {str(t)} for key {key}')
return comp(label=info.label, value=fun, **(args or {}))
components = []
def run_settings(*args):
up = []
for key, value, comp in zip(opts.data_labels.keys(), args, components):
comp_args = opts.data_labels[key].component_args
if comp_args and isinstance(comp_args, dict) and comp_args.get('visible') is False:
continue
oldval = opts.data.get(key, None)
opts.data[key] = value
if oldval != value and opts.data_labels[key].onchange is not None:
opts.data_labels[key].onchange()
up.append(comp.update(value=value))
opts.save(shared.config_filename)
return 'Settings applied.'
with gr.Blocks(analytics_enabled=False) as settings_interface:
settings_submit = gr.Button(value="Apply settings", variant='primary')
result = gr.HTML()
settings_cols = 3
items_per_col = int(len(opts.data_labels) * 0.9 / settings_cols)
cols_displayed = 0
items_displayed = 0
previous_section = None
column = None
with gr.Row(elem_id="settings").style(equal_height=False):
for i, (k, item) in enumerate(opts.data_labels.items()):
if previous_section != item.section:
if cols_displayed < settings_cols and (items_displayed >= items_per_col or previous_section is None):
if column is not None:
column.__exit__()
column = gr.Column(variant='panel')
column.__enter__()
items_displayed = 0
cols_displayed += 1
previous_section = item.section
gr.HTML(elem_id="settings_header_text_{}".format(item.section[0]), value='<h1 class="gr-button-lg">{}</h1>'.format(item.section[1]))
components.append(create_setting_component(k))
items_displayed += 1
request_notifications = gr.Button(value='Request browser notifications', elem_id="request_notifications")
request_notifications.click(
fn=lambda: None,
inputs=[],
outputs=[],
_js='function(){}'
)
if column is not None:
column.__exit__()
settings_submit.click(
fn=run_settings,
inputs=components,
outputs=[result]
)
interfaces = [
(txt2img_interface, "txt2img", "txt2img"),
(img2img_interface, "img2img", "img2img"),
(extras_interface, "Extras", "extras"),
(pnginfo_interface, "PNG Info", "pnginfo"),
(settings_interface, "Settings", "settings"),
]
with open(os.path.join(script_path, "style.css"), "r", encoding="utf8") as file:
css = file.read()
if os.path.exists(os.path.join(script_path, "user.css")):
with open(os.path.join(script_path, "user.css"), "r", encoding="utf8") as file:
usercss = file.read()
css += usercss
if not cmd_opts.no_progressbar_hiding:
css += css_hide_progressbar
with gr.Blocks(css=css, analytics_enabled=False, title="Stable Diffusion") as demo:
with gr.Tabs() as tabs:
for interface, label, ifid in interfaces:
with gr.TabItem(label, id=ifid):
interface.render()
text_settings = gr.Textbox(elem_id="settings_json", value=lambda: opts.dumpjson(), visible=False)
settings_submit.click(
fn=lambda: opts.dumpjson(),
inputs=[],
outputs=[text_settings],
)
tabs.change(
fn=lambda x: x,
inputs=[init_img_with_mask],
outputs=[init_img_with_mask],
)
tabs_img2img_mode.change(
fn=lambda x: x,
inputs=[init_img_with_mask],
outputs=[init_img_with_mask],
)
send_to_img2img.click(
fn=lambda x: image_from_url_text(x),
_js="extract_image_from_gallery_img2img",
inputs=[txt2img_gallery],
outputs=[init_img],
)
send_to_inpaint.click(
fn=lambda x: image_from_url_text(x),
_js="extract_image_from_gallery_img2img",
inputs=[txt2img_gallery],
outputs=[init_img_with_mask],
)
img2img_send_to_img2img.click(
fn=lambda x: image_from_url_text(x),
_js="extract_image_from_gallery",
inputs=[img2img_gallery],
outputs=[init_img],
)
img2img_send_to_inpaint.click(
fn=lambda x: image_from_url_text(x),
_js="extract_image_from_gallery",
inputs=[img2img_gallery],
outputs=[init_img_with_mask],
)
send_to_extras.click(
fn=lambda x: image_from_url_text(x),
_js="extract_image_from_gallery_extras",
inputs=[txt2img_gallery],
outputs=[image],
)
img2img_send_to_extras.click(
fn=lambda x: image_from_url_text(x),
_js="extract_image_from_gallery_extras",
inputs=[img2img_gallery],
outputs=[image],
)
ui_config_file = cmd_opts.ui_config_file
ui_settings = {}
settings_count = len(ui_settings)
error_loading = False
try:
if os.path.exists(ui_config_file):
with open(ui_config_file, "r", encoding="utf8") as file:
ui_settings = json.load(file)
except Exception:
error_loading = True
print("Error loading settings:", file=sys.stderr)
print(traceback.format_exc(), file=sys.stderr)
def loadsave(path, x):
def apply_field(obj, field, condition=None):
key = path + "/" + field
saved_value = ui_settings.get(key, None)
if saved_value is None:
ui_settings[key] = getattr(obj, field)
elif condition is None or condition(saved_value):
setattr(obj, field, saved_value)
if type(x) == gr.Slider:
apply_field(x, 'value')
apply_field(x, 'minimum')
apply_field(x, 'maximum')
apply_field(x, 'step')
if type(x) == gr.Radio:
apply_field(x, 'value', lambda val: val in x.choices)
visit(txt2img_interface, loadsave, "txt2img")
visit(img2img_interface, loadsave, "img2img")
visit(extras_interface, loadsave, "extras")
if not error_loading and (not os.path.exists(ui_config_file) or settings_count != len(ui_settings)):
with open(ui_config_file, "w", encoding="utf8") as file:
json.dump(ui_settings, file, indent=4)
return demo
with open(os.path.join(script_path, "script.js"), "r", encoding="utf8") as jsfile:
javascript = f'<script>{jsfile.read()}</script>'
jsdir = os.path.join(script_path, "javascript")
for filename in os.listdir(jsdir):
with open(os.path.join(jsdir, filename), "r", encoding="utf8") as jsfile:
javascript += f"\n<script>{jsfile.read()}</script>"
def template_response(*args, **kwargs):
res = gradio_routes_templates_response(*args, **kwargs)
res.body = res.body.replace(b'</head>', f'{javascript}</head>'.encode("utf8"))
res.init_headers()
return res
gradio_routes_templates_response = gradio.routes.templates.TemplateResponse
gradio.routes.templates.TemplateResponse = template_response
<|code_end|>
|
modules/ui.py
<|code_start|>import base64
import html
import io
import json
import math
import mimetypes
import os
import random
import sys
import time
import traceback
import numpy as np
import torch
from PIL import Image
import gradio as gr
import gradio.utils
import gradio.routes
from modules.paths import script_path
from modules.shared import opts, cmd_opts
import modules.shared as shared
from modules.sd_samplers import samplers, samplers_for_img2img
import modules.ldsr_model
import modules.scripts
import modules.gfpgan_model
import modules.codeformer_model
import modules.styles
# this is a fix for Windows users. Without it, javascript files will be served with text/html content-type and the bowser will not show any UI
mimetypes.init()
mimetypes.add_type('application/javascript', '.js')
if not cmd_opts.share and not cmd_opts.listen:
# fix gradio phoning home
gradio.utils.version_check = lambda: None
gradio.utils.get_local_ip_address = lambda: '127.0.0.1'
def gr_show(visible=True):
return {"visible": visible, "__type__": "update"}
sample_img2img = "assets/stable-samples/img2img/sketch-mountains-input.jpg"
sample_img2img = sample_img2img if os.path.exists(sample_img2img) else None
css_hide_progressbar = """
.wrap .m-12 svg { display:none!important; }
.wrap .m-12::before { content:"Loading..." }
.progress-bar { display:none!important; }
.meta-text { display:none!important; }
"""
# Using constants for these since the variation selector isn't visible.
# Important that they exactly match script.js for tooltip to work.
random_symbol = '\U0001f3b2\ufe0f' # 🎲️
reuse_symbol = '\u267b\ufe0f' # ♻️
def plaintext_to_html(text):
text = "<p>" + "<br>\n".join([f"{html.escape(x)}" for x in text.split('\n')]) + "</p>"
return text
def image_from_url_text(filedata):
if type(filedata) == list:
if len(filedata) == 0:
return None
filedata = filedata[0]
if filedata.startswith("data:image/png;base64,"):
filedata = filedata[len("data:image/png;base64,"):]
filedata = base64.decodebytes(filedata.encode('utf-8'))
image = Image.open(io.BytesIO(filedata))
return image
def send_gradio_gallery_to_image(x):
if len(x) == 0:
return None
return image_from_url_text(x[0])
def save_files(js_data, images, index):
import csv
os.makedirs(opts.outdir_save, exist_ok=True)
filenames = []
data = json.loads(js_data)
if index > -1 and opts.save_selected_only and (index > 0 or not opts.return_grid): # ensures we are looking at a specific non-grid picture, and we have save_selected_only
images = [images[index]]
data["seed"] += (index - 1 if opts.return_grid else index)
with open(os.path.join(opts.outdir_save, "log.csv"), "a", encoding="utf8", newline='') as file:
at_start = file.tell() == 0
writer = csv.writer(file)
if at_start:
writer.writerow(["prompt", "seed", "width", "height", "sampler", "cfgs", "steps", "filename", "negative_prompt"])
filename_base = str(int(time.time() * 1000))
for i, filedata in enumerate(images):
filename = filename_base + ("" if len(images) == 1 else "-" + str(i + 1)) + ".png"
filepath = os.path.join(opts.outdir_save, filename)
if filedata.startswith("data:image/png;base64,"):
filedata = filedata[len("data:image/png;base64,"):]
with open(filepath, "wb") as imgfile:
imgfile.write(base64.decodebytes(filedata.encode('utf-8')))
filenames.append(filename)
writer.writerow([data["prompt"], data["seed"], data["width"], data["height"], data["sampler"], data["cfg_scale"], data["steps"], filenames[0], data["negative_prompt"]])
return '', '', plaintext_to_html(f"Saved: {filenames[0]}")
def wrap_gradio_call(func):
def f(*args, **kwargs):
run_memmon = opts.memmon_poll_rate > 0 and not shared.mem_mon.disabled
if run_memmon:
shared.mem_mon.monitor()
t = time.perf_counter()
try:
res = list(func(*args, **kwargs))
except Exception as e:
print("Error completing request", file=sys.stderr)
print("Arguments:", args, kwargs, file=sys.stderr)
print(traceback.format_exc(), file=sys.stderr)
shared.state.job = ""
shared.state.job_count = 0
res = [None, '', f"<div class='error'>{plaintext_to_html(type(e).__name__+': '+str(e))}</div>"]
elapsed = time.perf_counter() - t
if run_memmon:
mem_stats = {k: -(v//-(1024*1024)) for k, v in shared.mem_mon.stop().items()}
active_peak = mem_stats['active_peak']
reserved_peak = mem_stats['reserved_peak']
sys_peak = mem_stats['system_peak']
sys_total = mem_stats['total']
sys_pct = round(sys_peak/max(sys_total, 1) * 100, 2)
vram_html = f"<p class='vram'>Torch active/reserved: {active_peak}/{reserved_peak} MiB, <wbr>Sys VRAM: {sys_peak}/{sys_total} MiB ({sys_pct}%)</p>"
else:
vram_html = ''
# last item is always HTML
res[-1] += f"<div class='performance'><p class='time'>Time taken: <wbr>{elapsed:.2f}s</p>{vram_html}</div>"
shared.state.interrupted = False
return tuple(res)
return f
def check_progress_call():
if shared.state.job_count == 0:
return "", gr_show(False), gr_show(False)
progress = 0
if shared.state.job_count > 0:
progress += shared.state.job_no / shared.state.job_count
if shared.state.sampling_steps > 0:
progress += 1 / shared.state.job_count * shared.state.sampling_step / shared.state.sampling_steps
progress = min(progress, 1)
progressbar = ""
if opts.show_progressbar:
progressbar = f"""<div class='progressDiv'><div class='progress' style="width:{progress * 100}%">{str(int(progress*100))+"%" if progress > 0.01 else ""}</div></div>"""
image = gr_show(False)
preview_visibility = gr_show(False)
if opts.show_progress_every_n_steps > 0:
if shared.parallel_processing_allowed:
if shared.state.sampling_step - shared.state.current_image_sampling_step >= opts.show_progress_every_n_steps and shared.state.current_latent is not None:
shared.state.current_image = modules.sd_samplers.sample_to_image(shared.state.current_latent)
shared.state.current_image_sampling_step = shared.state.sampling_step
image = shared.state.current_image
if image is None or progress >= 1:
image = gr.update(value=None)
else:
preview_visibility = gr_show(True)
return f"<span id='progressSpan' style='display: none'>{time.time()}</span><p>{progressbar}</p>", preview_visibility, image
def check_progress_call_initial():
shared.state.job_count = -1
shared.state.current_latent = None
shared.state.current_image = None
return check_progress_call()
def roll_artist(prompt):
allowed_cats = set([x for x in shared.artist_db.categories() if len(opts.random_artist_categories)==0 or x in opts.random_artist_categories])
artist = random.choice([x for x in shared.artist_db.artists if x.category in allowed_cats])
return prompt + ", " + artist.name if prompt != '' else artist.name
def visit(x, func, path=""):
if hasattr(x, 'children'):
for c in x.children:
visit(c, func, path)
elif x.label is not None:
func(path + "/" + str(x.label), x)
def add_style(name: str, prompt: str, negative_prompt: str):
if name is None:
return [gr_show(), gr_show()]
style = modules.styles.PromptStyle(name, prompt, negative_prompt)
shared.prompt_styles.styles[style.name] = style
# Save all loaded prompt styles: this allows us to update the storage format in the future more easily, because we
# reserialize all styles every time we save them
shared.prompt_styles.save_styles(shared.styles_filename)
update = {"visible": True, "choices": list(shared.prompt_styles.styles), "__type__": "update"}
return [update, update, update, update]
def apply_styles(prompt, prompt_neg, style1_name, style2_name):
prompt = shared.prompt_styles.apply_styles_to_prompt(prompt, [style1_name, style2_name])
prompt_neg = shared.prompt_styles.apply_negative_styles_to_prompt(prompt_neg, [style1_name, style2_name])
return [gr.Textbox.update(value=prompt), gr.Textbox.update(value=prompt_neg), gr.Dropdown.update(value="None"), gr.Dropdown.update(value="None")]
def interrogate(image):
prompt = shared.interrogator.interrogate(image)
return gr_show(True) if prompt is None else prompt
def create_seed_inputs():
with gr.Row():
with gr.Box():
with gr.Row(elem_id='seed_row'):
seed = (gr.Textbox if cmd_opts.use_textbox_seed else gr.Number)(label='Seed', value=-1)
seed.style(container=False)
random_seed = gr.Button(random_symbol, elem_id='random_seed')
reuse_seed = gr.Button(reuse_symbol, elem_id='reuse_seed')
with gr.Box(elem_id='subseed_show_box'):
seed_checkbox = gr.Checkbox(label='Extra', elem_id='subseed_show', value=False)
# Components to show/hide based on the 'Extra' checkbox
seed_extras = []
with gr.Row(visible=False) as seed_extra_row_1:
seed_extras.append(seed_extra_row_1)
with gr.Box():
with gr.Row(elem_id='subseed_row'):
subseed = gr.Number(label='Variation seed', value=-1)
subseed.style(container=False)
random_subseed = gr.Button(random_symbol, elem_id='random_subseed')
reuse_subseed = gr.Button(reuse_symbol, elem_id='reuse_subseed')
subseed_strength = gr.Slider(label='Variation strength', value=0.0, minimum=0, maximum=1, step=0.01)
with gr.Row(visible=False) as seed_extra_row_2:
seed_extras.append(seed_extra_row_2)
seed_resize_from_w = gr.Slider(minimum=0, maximum=2048, step=64, label="Resize seed from width", value=0)
seed_resize_from_h = gr.Slider(minimum=0, maximum=2048, step=64, label="Resize seed from height", value=0)
random_seed.click(fn=lambda: -1, show_progress=False, inputs=[], outputs=[seed])
random_subseed.click(fn=lambda: -1, show_progress=False, inputs=[], outputs=[subseed])
def change_visibility(show):
return {comp: gr_show(show) for comp in seed_extras}
seed_checkbox.change(change_visibility, show_progress=False, inputs=[seed_checkbox], outputs=seed_extras)
return seed, reuse_seed, subseed, reuse_subseed, subseed_strength, seed_resize_from_h, seed_resize_from_w, seed_checkbox
def connect_reuse_seed(seed: gr.Number, reuse_seed: gr.Button, generation_info: gr.Textbox, dummy_component, is_subseed):
""" Connects a 'reuse (sub)seed' button's click event so that it copies last used
(sub)seed value from generation info the to the seed field. If copying subseed and subseed strength
was 0, i.e. no variation seed was used, it copies the normal seed value instead."""
def copy_seed(gen_info_string: str, index):
res = -1
try:
gen_info = json.loads(gen_info_string)
index -= gen_info.get('index_of_first_image', 0)
if is_subseed and gen_info.get('subseed_strength', 0) > 0:
all_subseeds = gen_info.get('all_subseeds', [-1])
res = all_subseeds[index if 0 <= index < len(all_subseeds) else 0]
else:
all_seeds = gen_info.get('all_seeds', [-1])
res = all_seeds[index if 0 <= index < len(all_seeds) else 0]
except json.decoder.JSONDecodeError as e:
if gen_info_string != '':
print("Error parsing JSON generation info:", file=sys.stderr)
print(gen_info_string, file=sys.stderr)
return [res, gr_show(False)]
reuse_seed.click(
fn=copy_seed,
_js="(x, y) => [x, selected_gallery_index()]",
show_progress=False,
inputs=[generation_info, dummy_component],
outputs=[seed, dummy_component]
)
def create_toprow(is_img2img):
with gr.Row(elem_id="toprow"):
with gr.Column(scale=4):
with gr.Row():
with gr.Column(scale=8):
with gr.Row():
prompt = gr.Textbox(label="Prompt", elem_id="prompt", show_label=False, placeholder="Prompt", lines=2)
roll = gr.Button('Roll', elem_id="roll", visible=len(shared.artist_db.artists) > 0)
with gr.Column(scale=1, elem_id="style_pos_col"):
prompt_style = gr.Dropdown(label="Style 1", elem_id="style_index", choices=[k for k, v in shared.prompt_styles.styles.items()], value=next(iter(shared.prompt_styles.styles.keys())), visible=len(shared.prompt_styles.styles) > 1)
with gr.Row():
with gr.Column(scale=8):
negative_prompt = gr.Textbox(label="Negative prompt", elem_id="negative_prompt", show_label=False, placeholder="Negative prompt", lines=2)
with gr.Column(scale=1, elem_id="style_neg_col"):
prompt_style2 = gr.Dropdown(label="Style 2", elem_id="style2_index", choices=[k for k, v in shared.prompt_styles.styles.items()], value=next(iter(shared.prompt_styles.styles.keys())), visible=len(shared.prompt_styles.styles) > 1)
with gr.Column(scale=1):
with gr.Row():
interrupt = gr.Button('Interrupt', elem_id="interrupt")
submit = gr.Button('Generate', elem_id="generate", variant='primary')
interrupt.click(
fn=lambda: shared.state.interrupt(),
inputs=[],
outputs=[],
)
with gr.Row():
if is_img2img:
interrogate = gr.Button('Interrogate', elem_id="interrogate")
else:
interrogate = None
prompt_style_apply = gr.Button('Apply style', elem_id="style_apply")
save_style = gr.Button('Create style', elem_id="style_create")
return prompt, roll, prompt_style, negative_prompt, prompt_style2, submit, interrogate, prompt_style_apply, save_style
def setup_progressbar(progressbar, preview):
check_progress = gr.Button('Check progress', elem_id="check_progress", visible=False)
check_progress.click(
fn=check_progress_call,
show_progress=False,
inputs=[],
outputs=[progressbar, preview, preview],
)
check_progress_initial = gr.Button('Check progress (first)', elem_id="check_progress_initial", visible=False)
check_progress_initial.click(
fn=check_progress_call_initial,
show_progress=False,
inputs=[],
outputs=[progressbar, preview, preview],
)
def create_ui(txt2img, img2img, run_extras, run_pnginfo):
with gr.Blocks(analytics_enabled=False) as txt2img_interface:
txt2img_prompt, roll, txt2img_prompt_style, txt2img_negative_prompt, txt2img_prompt_style2, submit, _, txt2img_prompt_style_apply, txt2img_save_style = create_toprow(is_img2img=False)
dummy_component = gr.Label(visible=False)
with gr.Row(elem_id='progressRow'):
with gr.Column(scale=1):
columnEmpty = "Empty"
with gr.Column(scale=1):
progressbar = gr.HTML(elem_id="progressbar")
txt2img_preview = gr.Image(elem_id='txt2img_preview', visible=False)
setup_progressbar(progressbar, txt2img_preview)
with gr.Row().style(equal_height=False):
with gr.Column(variant='panel'):
steps = gr.Slider(minimum=1, maximum=150, step=1, label="Sampling Steps", value=20)
sampler_index = gr.Radio(label='Sampling method', elem_id="txt2img_sampling", choices=[x.name for x in samplers], value=samplers[0].name, type="index")
with gr.Row():
restore_faces = gr.Checkbox(label='Restore faces', value=False, visible=len(shared.face_restorers) > 1)
tiling = gr.Checkbox(label='Tiling', value=False)
enable_hr = gr.Checkbox(label='Highres. fix', value=False)
with gr.Row(visible=False) as hr_options:
scale_latent = gr.Checkbox(label='Scale latent', value=False)
denoising_strength = gr.Slider(minimum=0.0, maximum=1.0, step=0.01, label='Denoising strength', value=0.7)
with gr.Row():
batch_count = gr.Slider(minimum=1, maximum=cmd_opts.max_batch_count, step=1, label='Batch count', value=1)
batch_size = gr.Slider(minimum=1, maximum=8, step=1, label='Batch size', value=1)
cfg_scale = gr.Slider(minimum=1.0, maximum=30.0, step=0.5, label='CFG Scale', value=7.0)
with gr.Group():
width = gr.Slider(minimum=64, maximum=2048, step=64, label="Width", value=512)
height = gr.Slider(minimum=64, maximum=2048, step=64, label="Height", value=512)
seed, reuse_seed, subseed, reuse_subseed, subseed_strength, seed_resize_from_h, seed_resize_from_w, seed_checkbox = create_seed_inputs()
with gr.Group():
custom_inputs = modules.scripts.scripts_txt2img.setup_ui(is_img2img=False)
with gr.Column(variant='panel'):
with gr.Group():
txt2img_preview = gr.Image(elem_id='txt2img_preview', visible=False)
txt2img_gallery = gr.Gallery(label='Output', show_label=False, elem_id='txt2img_gallery').style(grid=4)
with gr.Group():
with gr.Row():
save = gr.Button('Save')
send_to_img2img = gr.Button('Send to img2img')
send_to_inpaint = gr.Button('Send to inpaint')
send_to_extras = gr.Button('Send to extras')
with gr.Group():
html_info = gr.HTML()
generation_info = gr.Textbox(visible=False)
connect_reuse_seed(seed, reuse_seed, generation_info, dummy_component, is_subseed=False)
connect_reuse_seed(subseed, reuse_subseed, generation_info, dummy_component, is_subseed=True)
txt2img_args = dict(
fn=txt2img,
_js="submit",
inputs=[
txt2img_prompt,
txt2img_negative_prompt,
txt2img_prompt_style,
txt2img_prompt_style2,
steps,
sampler_index,
restore_faces,
tiling,
batch_count,
batch_size,
cfg_scale,
seed,
subseed, subseed_strength, seed_resize_from_h, seed_resize_from_w, seed_checkbox,
height,
width,
enable_hr,
scale_latent,
denoising_strength,
] + custom_inputs,
outputs=[
txt2img_gallery,
generation_info,
html_info
],
show_progress=False,
)
txt2img_prompt.submit(**txt2img_args)
submit.click(**txt2img_args)
enable_hr.change(
fn=lambda x: gr_show(x),
inputs=[enable_hr],
outputs=[hr_options],
)
save.click(
fn=wrap_gradio_call(save_files),
_js="(x, y, z) => [x, y, selected_gallery_index()]",
inputs=[
generation_info,
txt2img_gallery,
html_info,
],
outputs=[
html_info,
html_info,
html_info,
]
)
roll.click(
fn=roll_artist,
inputs=[
txt2img_prompt,
],
outputs=[
txt2img_prompt,
]
)
with gr.Blocks(analytics_enabled=False) as img2img_interface:
img2img_prompt, roll, img2img_prompt_style, img2img_negative_prompt, img2img_prompt_style2, submit, img2img_interrogate, img2img_prompt_style_apply, img2img_save_style = create_toprow(is_img2img=True)
with gr.Row(elem_id='progressRow'):
with gr.Column(scale=1):
columnEmpty = "Empty"
with gr.Column(scale=1):
progressbar = gr.HTML(elem_id="progressbar")
img2img_preview = gr.Image(elem_id='img2img_preview', visible=False)
setup_progressbar(progressbar, img2img_preview)
with gr.Row().style(equal_height=False):
with gr.Column(variant='panel'):
with gr.Tabs(elem_id="mode_img2img") as tabs_img2img_mode:
with gr.TabItem('img2img'):
init_img = gr.Image(label="Image for img2img", show_label=False, source="upload", interactive=True, type="pil")
with gr.TabItem('Inpaint'):
init_img_with_mask = gr.Image(label="Image for inpainting with mask", show_label=False, elem_id="img2maskimg", source="upload", interactive=True, type="pil", tool="sketch", image_mode="RGBA")
init_img_with_mask_comment = gr.HTML(elem_id="mask_bug_info", value="<small>if the editor shows ERROR, switch to another tab and back, then to \"Upload mask\" mode above and back</small>")
init_img_inpaint = gr.Image(label="Image for img2img", show_label=False, source="upload", interactive=True, type="pil", visible=False)
init_mask_inpaint = gr.Image(label="Mask", source="upload", interactive=True, type="pil", visible=False)
mask_blur = gr.Slider(label='Mask blur', minimum=0, maximum=64, step=1, value=4)
with gr.Row():
mask_mode = gr.Radio(label="Mask mode", show_label=False, choices=["Draw mask", "Upload mask"], type="index", value="Draw mask")
inpainting_mask_invert = gr.Radio(label='Masking mode', show_label=False, choices=['Inpaint masked', 'Inpaint not masked'], value='Inpaint masked', type="index")
inpainting_fill = gr.Radio(label='Masked content', choices=['fill', 'original', 'latent noise', 'latent nothing'], value='fill', type="index")
with gr.Row():
inpaint_full_res = gr.Checkbox(label='Inpaint at full resolution', value=False)
inpaint_full_res_padding = gr.Slider(label='Inpaint at full resolution padding, pixels', minimum=0, maximum=256, step=4, value=32)
with gr.TabItem('Batch img2img'):
gr.HTML("<p class=\"text-gray-500\">Process images in a directory on the same machine where the server is running.</p>")
img2img_batch_input_dir = gr.Textbox(label="Input directory")
img2img_batch_output_dir = gr.Textbox(label="Output directory")
with gr.Row():
resize_mode = gr.Radio(label="Resize mode", elem_id="resize_mode", show_label=False, choices=["Just resize", "Crop and resize", "Resize and fill"], type="index", value="Just resize")
steps = gr.Slider(minimum=1, maximum=150, step=1, label="Sampling Steps", value=20)
sampler_index = gr.Radio(label='Sampling method', choices=[x.name for x in samplers_for_img2img], value=samplers_for_img2img[0].name, type="index")
with gr.Row():
restore_faces = gr.Checkbox(label='Restore faces', value=False, visible=len(shared.face_restorers) > 1)
tiling = gr.Checkbox(label='Tiling', value=False)
with gr.Row():
batch_count = gr.Slider(minimum=1, maximum=cmd_opts.max_batch_count, step=1, label='Batch count', value=1)
batch_size = gr.Slider(minimum=1, maximum=8, step=1, label='Batch size', value=1)
with gr.Group():
cfg_scale = gr.Slider(minimum=1.0, maximum=30.0, step=0.5, label='CFG Scale', value=7.0)
denoising_strength = gr.Slider(minimum=0.0, maximum=1.0, step=0.01, label='Denoising strength', value=0.75)
with gr.Group():
width = gr.Slider(minimum=64, maximum=2048, step=64, label="Width", value=512)
height = gr.Slider(minimum=64, maximum=2048, step=64, label="Height", value=512)
seed, reuse_seed, subseed, reuse_subseed, subseed_strength, seed_resize_from_h, seed_resize_from_w, seed_checkbox = create_seed_inputs()
with gr.Group():
custom_inputs = modules.scripts.scripts_img2img.setup_ui(is_img2img=True)
with gr.Column(variant='panel'):
with gr.Group():
img2img_preview = gr.Image(elem_id='img2img_preview', visible=False)
img2img_gallery = gr.Gallery(label='Output', show_label=False, elem_id='img2img_gallery').style(grid=4)
with gr.Group():
with gr.Row():
save = gr.Button('Save')
img2img_send_to_img2img = gr.Button('Send to img2img')
img2img_send_to_inpaint = gr.Button('Send to inpaint')
img2img_send_to_extras = gr.Button('Send to extras')
img2img_save_style = gr.Button('Save prompt as style')
with gr.Group():
html_info = gr.HTML()
generation_info = gr.Textbox(visible=False)
connect_reuse_seed(seed, reuse_seed, generation_info, dummy_component, is_subseed=False)
connect_reuse_seed(subseed, reuse_subseed, generation_info, dummy_component, is_subseed=True)
mask_mode.change(
lambda mode, img: {
#init_img_with_mask: gr.Image.update(visible=mode == 0, value=img["image"]),
init_img_with_mask: gr_show(mode == 0),
init_img_with_mask_comment: gr_show(mode == 0),
init_img_inpaint: gr_show(mode == 1),
init_mask_inpaint: gr_show(mode == 1),
},
inputs=[mask_mode, init_img_with_mask],
outputs=[
init_img_with_mask,
init_img_with_mask_comment,
init_img_inpaint,
init_mask_inpaint,
],
)
img2img_args = dict(
fn=img2img,
_js="submit_img2img",
inputs=[
dummy_component,
img2img_prompt,
img2img_negative_prompt,
img2img_prompt_style,
img2img_prompt_style2,
init_img,
init_img_with_mask,
init_img_inpaint,
init_mask_inpaint,
mask_mode,
steps,
sampler_index,
mask_blur,
inpainting_fill,
restore_faces,
tiling,
batch_count,
batch_size,
cfg_scale,
denoising_strength,
seed,
subseed, subseed_strength, seed_resize_from_h, seed_resize_from_w, seed_checkbox,
height,
width,
resize_mode,
inpaint_full_res,
inpaint_full_res_padding,
inpainting_mask_invert,
img2img_batch_input_dir,
img2img_batch_output_dir,
] + custom_inputs,
outputs=[
img2img_gallery,
generation_info,
html_info
],
show_progress=False,
)
img2img_prompt.submit(**img2img_args)
submit.click(**img2img_args)
img2img_interrogate.click(
fn=interrogate,
inputs=[init_img],
outputs=[img2img_prompt],
)
save.click(
fn=wrap_gradio_call(save_files),
_js="(x, y, z) => [x, y, selected_gallery_index()]",
inputs=[
generation_info,
img2img_gallery,
html_info
],
outputs=[
html_info,
html_info,
html_info,
]
)
roll.click(
fn=roll_artist,
inputs=[
img2img_prompt,
],
outputs=[
img2img_prompt,
]
)
prompts = [(txt2img_prompt, txt2img_negative_prompt), (img2img_prompt, img2img_negative_prompt)]
style_dropdowns = [(txt2img_prompt_style, txt2img_prompt_style2), (img2img_prompt_style, img2img_prompt_style2)]
for button, (prompt, negative_prompt) in zip([txt2img_save_style, img2img_save_style], prompts):
button.click(
fn=add_style,
_js="ask_for_style_name",
# Have to pass empty dummy component here, because the JavaScript and Python function have to accept
# the same number of parameters, but we only know the style-name after the JavaScript prompt
inputs=[dummy_component, prompt, negative_prompt],
outputs=[txt2img_prompt_style, img2img_prompt_style, txt2img_prompt_style2, img2img_prompt_style2],
)
for button, (prompt, negative_prompt), (style1, style2) in zip([txt2img_prompt_style_apply, img2img_prompt_style_apply], prompts, style_dropdowns):
button.click(
fn=apply_styles,
inputs=[prompt, negative_prompt, style1, style2],
outputs=[prompt, negative_prompt, style1, style2],
)
with gr.Blocks(analytics_enabled=False) as extras_interface:
with gr.Row().style(equal_height=False):
with gr.Column(variant='panel'):
with gr.Tabs(elem_id="mode_extras"):
with gr.TabItem('Single Image'):
image = gr.Image(label="Source", source="upload", interactive=True, type="pil")
with gr.TabItem('Batch Process'):
image_batch = gr.File(label="Batch Process", file_count="multiple", interactive=True, type="file")
upscaling_resize = gr.Slider(minimum=1.0, maximum=4.0, step=0.05, label="Resize", value=2)
with gr.Group():
extras_upscaler_1 = gr.Radio(label='Upscaler 1', choices=[x.name for x in shared.sd_upscalers], value=shared.sd_upscalers[0].name, type="index")
with gr.Group():
extras_upscaler_2 = gr.Radio(label='Upscaler 2', choices=[x.name for x in shared.sd_upscalers], value=shared.sd_upscalers[0].name, type="index")
extras_upscaler_2_visibility = gr.Slider(minimum=0.0, maximum=1.0, step=0.001, label="Upscaler 2 visibility", value=1)
with gr.Group():
gfpgan_visibility = gr.Slider(minimum=0.0, maximum=1.0, step=0.001, label="GFPGAN visibility", value=0, interactive=modules.gfpgan_model.have_gfpgan)
with gr.Group():
codeformer_visibility = gr.Slider(minimum=0.0, maximum=1.0, step=0.001, label="CodeFormer visibility", value=0, interactive=modules.codeformer_model.have_codeformer)
codeformer_weight = gr.Slider(minimum=0.0, maximum=1.0, step=0.001, label="CodeFormer weight (0 = maximum effect, 1 = minimum effect)", value=0, interactive=modules.codeformer_model.have_codeformer)
submit = gr.Button('Generate', elem_id="extras_generate", variant='primary')
with gr.Column(variant='panel'):
result_images = gr.Gallery(label="Result", show_label=False)
html_info_x = gr.HTML()
html_info = gr.HTML()
submit.click(
fn=run_extras,
_js="get_extras_tab_index",
inputs=[
dummy_component,
image,
image_batch,
gfpgan_visibility,
codeformer_visibility,
codeformer_weight,
upscaling_resize,
extras_upscaler_1,
extras_upscaler_2,
extras_upscaler_2_visibility,
],
outputs=[
result_images,
html_info_x,
html_info,
]
)
pnginfo_interface = gr.Interface(
wrap_gradio_call(run_pnginfo),
inputs=[
gr.Image(elem_id="pnginfo_image", label="Source", source="upload", interactive=True, type="pil"),
],
outputs=[
gr.HTML(),
gr.HTML(),
gr.HTML(),
],
allow_flagging="never",
analytics_enabled=False,
live=True,
)
def create_setting_component(key):
def fun():
return opts.data[key] if key in opts.data else opts.data_labels[key].default
info = opts.data_labels[key]
t = type(info.default)
args = info.component_args() if callable(info.component_args) else info.component_args
if info.component is not None:
comp = info.component
elif t == str:
comp = gr.Textbox
elif t == int:
comp = gr.Number
elif t == bool:
comp = gr.Checkbox
else:
raise Exception(f'bad options item type: {str(t)} for key {key}')
return comp(label=info.label, value=fun, **(args or {}))
components = []
def run_settings(*args):
up = []
for key, value, comp in zip(opts.data_labels.keys(), args, components):
comp_args = opts.data_labels[key].component_args
if comp_args and isinstance(comp_args, dict) and comp_args.get('visible') is False:
continue
oldval = opts.data.get(key, None)
opts.data[key] = value
if oldval != value and opts.data_labels[key].onchange is not None:
opts.data_labels[key].onchange()
up.append(comp.update(value=value))
opts.save(shared.config_filename)
return 'Settings applied.'
with gr.Blocks(analytics_enabled=False) as settings_interface:
settings_submit = gr.Button(value="Apply settings", variant='primary')
result = gr.HTML()
settings_cols = 3
items_per_col = int(len(opts.data_labels) * 0.9 / settings_cols)
cols_displayed = 0
items_displayed = 0
previous_section = None
column = None
with gr.Row(elem_id="settings").style(equal_height=False):
for i, (k, item) in enumerate(opts.data_labels.items()):
if previous_section != item.section:
if cols_displayed < settings_cols and (items_displayed >= items_per_col or previous_section is None):
if column is not None:
column.__exit__()
column = gr.Column(variant='panel')
column.__enter__()
items_displayed = 0
cols_displayed += 1
previous_section = item.section
gr.HTML(elem_id="settings_header_text_{}".format(item.section[0]), value='<h1 class="gr-button-lg">{}</h1>'.format(item.section[1]))
components.append(create_setting_component(k))
items_displayed += 1
request_notifications = gr.Button(value='Request browser notifications', elem_id="request_notifications")
request_notifications.click(
fn=lambda: None,
inputs=[],
outputs=[],
_js='function(){}'
)
if column is not None:
column.__exit__()
settings_submit.click(
fn=run_settings,
inputs=components,
outputs=[result]
)
interfaces = [
(txt2img_interface, "txt2img", "txt2img"),
(img2img_interface, "img2img", "img2img"),
(extras_interface, "Extras", "extras"),
(pnginfo_interface, "PNG Info", "pnginfo"),
(settings_interface, "Settings", "settings"),
]
with open(os.path.join(script_path, "style.css"), "r", encoding="utf8") as file:
css = file.read()
if os.path.exists(os.path.join(script_path, "user.css")):
with open(os.path.join(script_path, "user.css"), "r", encoding="utf8") as file:
usercss = file.read()
css += usercss
if not cmd_opts.no_progressbar_hiding:
css += css_hide_progressbar
with gr.Blocks(css=css, analytics_enabled=False, title="Stable Diffusion") as demo:
with gr.Tabs() as tabs:
for interface, label, ifid in interfaces:
with gr.TabItem(label, id=ifid):
interface.render()
text_settings = gr.Textbox(elem_id="settings_json", value=lambda: opts.dumpjson(), visible=False)
settings_submit.click(
fn=lambda: opts.dumpjson(),
inputs=[],
outputs=[text_settings],
)
tabs.change(
fn=lambda x: x,
inputs=[init_img_with_mask],
outputs=[init_img_with_mask],
)
tabs_img2img_mode.change(
fn=lambda x: x,
inputs=[init_img_with_mask],
outputs=[init_img_with_mask],
)
send_to_img2img.click(
fn=lambda x: image_from_url_text(x),
_js="extract_image_from_gallery_img2img",
inputs=[txt2img_gallery],
outputs=[init_img],
)
send_to_inpaint.click(
fn=lambda x: image_from_url_text(x),
_js="extract_image_from_gallery_img2img",
inputs=[txt2img_gallery],
outputs=[init_img_with_mask],
)
img2img_send_to_img2img.click(
fn=lambda x: image_from_url_text(x),
_js="extract_image_from_gallery",
inputs=[img2img_gallery],
outputs=[init_img],
)
img2img_send_to_inpaint.click(
fn=lambda x: image_from_url_text(x),
_js="extract_image_from_gallery",
inputs=[img2img_gallery],
outputs=[init_img_with_mask],
)
send_to_extras.click(
fn=lambda x: image_from_url_text(x),
_js="extract_image_from_gallery_extras",
inputs=[txt2img_gallery],
outputs=[image],
)
img2img_send_to_extras.click(
fn=lambda x: image_from_url_text(x),
_js="extract_image_from_gallery_extras",
inputs=[img2img_gallery],
outputs=[image],
)
ui_config_file = cmd_opts.ui_config_file
ui_settings = {}
settings_count = len(ui_settings)
error_loading = False
try:
if os.path.exists(ui_config_file):
with open(ui_config_file, "r", encoding="utf8") as file:
ui_settings = json.load(file)
except Exception:
error_loading = True
print("Error loading settings:", file=sys.stderr)
print(traceback.format_exc(), file=sys.stderr)
def loadsave(path, x):
def apply_field(obj, field, condition=None):
key = path + "/" + field
saved_value = ui_settings.get(key, None)
if saved_value is None:
ui_settings[key] = getattr(obj, field)
elif condition is None or condition(saved_value):
setattr(obj, field, saved_value)
if type(x) == gr.Slider:
apply_field(x, 'value')
apply_field(x, 'minimum')
apply_field(x, 'maximum')
apply_field(x, 'step')
if type(x) == gr.Radio:
apply_field(x, 'value', lambda val: val in x.choices)
visit(txt2img_interface, loadsave, "txt2img")
visit(img2img_interface, loadsave, "img2img")
visit(extras_interface, loadsave, "extras")
if not error_loading and (not os.path.exists(ui_config_file) or settings_count != len(ui_settings)):
with open(ui_config_file, "w", encoding="utf8") as file:
json.dump(ui_settings, file, indent=4)
return demo
with open(os.path.join(script_path, "script.js"), "r", encoding="utf8") as jsfile:
javascript = f'<script>{jsfile.read()}</script>'
jsdir = os.path.join(script_path, "javascript")
for filename in os.listdir(jsdir):
with open(os.path.join(jsdir, filename), "r", encoding="utf8") as jsfile:
javascript += f"\n<script>{jsfile.read()}</script>"
def template_response(*args, **kwargs):
res = gradio_routes_templates_response(*args, **kwargs)
res.body = res.body.replace(b'</head>', f'{javascript}</head>'.encode("utf8"))
res.init_headers()
return res
gradio_routes_templates_response = gradio.routes.templates.TemplateResponse
gradio.routes.templates.TemplateResponse = template_response
<|code_end|>
|
Request: something akin to a batch ID for folder name pattern
**Is your feature request related to a problem? Please describe.**
I've been experimenting with the wildcards script linked in the wiki lately.
It's wonderful but unfortunately, it's wreaking havoc on the folder structure in my outputs/txt2img-images folder since every single image of a batch is likely to have a different prompt.
Another problem this feature request would solve is when you run the same prompt (or at least similar start) multiple times. With the current filename patterns available, multiple batches are all sorted in the same folder. This is, admittedly, not a huge annoyance to most users, but it can be an issue or at least unexpected behavior when your prompt from today gets sorted into a folder from 3 days ago because you reused a prompt.
**Describe the solution you'd like**
Generate an ID when starting a batch and make that available as a folder name pattern.
I don't care much about how that ID is formatted. I'd be fine with a random string of, let's say, 5 hexadecimal characters.
**Describe alternatives you've considered**
I don't believe there is a suitable alternative that allows a user to have images grouped by batch that does not have the potential to include images from other batches accidentally. At least not with the current file/directory patterns available.
**Additional context**
All of these are from just one wildcard batch of 20, all containing just a single image.

My output directory pattern is set up in such a way that I have a folder for every day. As you can see, the subfolder just for *today* has >1000 subfolders. It's hard to keep track of things like this.

|
modules/images.py
<|code_start|>import datetime
import math
import os
from collections import namedtuple
import re
import numpy as np
import piexif
import piexif.helper
from PIL import Image, ImageFont, ImageDraw, PngImagePlugin
from fonts.ttf import Roboto
import string
import modules.shared
from modules import sd_samplers, shared
from modules.shared import opts, cmd_opts
LANCZOS = (Image.Resampling.LANCZOS if hasattr(Image, 'Resampling') else Image.LANCZOS)
def image_grid(imgs, batch_size=1, rows=None):
if rows is None:
if opts.n_rows > 0:
rows = opts.n_rows
elif opts.n_rows == 0:
rows = batch_size
else:
rows = math.sqrt(len(imgs))
rows = round(rows)
cols = math.ceil(len(imgs) / rows)
w, h = imgs[0].size
grid = Image.new('RGB', size=(cols * w, rows * h), color='black')
for i, img in enumerate(imgs):
grid.paste(img, box=(i % cols * w, i // cols * h))
return grid
Grid = namedtuple("Grid", ["tiles", "tile_w", "tile_h", "image_w", "image_h", "overlap"])
def split_grid(image, tile_w=512, tile_h=512, overlap=64):
w = image.width
h = image.height
non_overlap_width = tile_w - overlap
non_overlap_height = tile_h - overlap
cols = math.ceil((w - overlap) / non_overlap_width)
rows = math.ceil((h - overlap) / non_overlap_height)
dx = (w - tile_w) / (cols-1) if cols > 1 else 0
dy = (h - tile_h) / (rows-1) if rows > 1 else 0
grid = Grid([], tile_w, tile_h, w, h, overlap)
for row in range(rows):
row_images = []
y = int(row * dy)
if y + tile_h >= h:
y = h - tile_h
for col in range(cols):
x = int(col * dx)
if x+tile_w >= w:
x = w - tile_w
tile = image.crop((x, y, x + tile_w, y + tile_h))
row_images.append([x, tile_w, tile])
grid.tiles.append([y, tile_h, row_images])
return grid
def combine_grid(grid):
def make_mask_image(r):
r = r * 255 / grid.overlap
r = r.astype(np.uint8)
return Image.fromarray(r, 'L')
mask_w = make_mask_image(np.arange(grid.overlap, dtype=np.float32).reshape((1, grid.overlap)).repeat(grid.tile_h, axis=0))
mask_h = make_mask_image(np.arange(grid.overlap, dtype=np.float32).reshape((grid.overlap, 1)).repeat(grid.image_w, axis=1))
combined_image = Image.new("RGB", (grid.image_w, grid.image_h))
for y, h, row in grid.tiles:
combined_row = Image.new("RGB", (grid.image_w, h))
for x, w, tile in row:
if x == 0:
combined_row.paste(tile, (0, 0))
continue
combined_row.paste(tile.crop((0, 0, grid.overlap, h)), (x, 0), mask=mask_w)
combined_row.paste(tile.crop((grid.overlap, 0, w, h)), (x + grid.overlap, 0))
if y == 0:
combined_image.paste(combined_row, (0, 0))
continue
combined_image.paste(combined_row.crop((0, 0, combined_row.width, grid.overlap)), (0, y), mask=mask_h)
combined_image.paste(combined_row.crop((0, grid.overlap, combined_row.width, h)), (0, y + grid.overlap))
return combined_image
class GridAnnotation:
def __init__(self, text='', is_active=True):
self.text = text
self.is_active = is_active
self.size = None
def draw_grid_annotations(im, width, height, hor_texts, ver_texts):
def wrap(drawing, text, font, line_length):
lines = ['']
for word in text.split():
line = f'{lines[-1]} {word}'.strip()
if drawing.textlength(line, font=font) <= line_length:
lines[-1] = line
else:
lines.append(word)
return lines
def draw_texts(drawing, draw_x, draw_y, lines):
for i, line in enumerate(lines):
drawing.multiline_text((draw_x, draw_y + line.size[1] / 2), line.text, font=fnt, fill=color_active if line.is_active else color_inactive, anchor="mm", align="center")
if not line.is_active:
drawing.line((draw_x - line.size[0]//2, draw_y + line.size[1]//2, draw_x + line.size[0]//2, draw_y + line.size[1]//2), fill=color_inactive, width=4)
draw_y += line.size[1] + line_spacing
fontsize = (width + height) // 25
line_spacing = fontsize // 2
try:
fnt = ImageFont.truetype(opts.font or Roboto, fontsize)
except Exception:
fnt = ImageFont.truetype(Roboto, fontsize)
color_active = (0, 0, 0)
color_inactive = (153, 153, 153)
pad_left = 0 if sum([sum([len(line.text) for line in lines]) for lines in ver_texts]) == 0 else width * 3 // 4
cols = im.width // width
rows = im.height // height
assert cols == len(hor_texts), f'bad number of horizontal texts: {len(hor_texts)}; must be {cols}'
assert rows == len(ver_texts), f'bad number of vertical texts: {len(ver_texts)}; must be {rows}'
calc_img = Image.new("RGB", (1, 1), "white")
calc_d = ImageDraw.Draw(calc_img)
for texts, allowed_width in zip(hor_texts + ver_texts, [width] * len(hor_texts) + [pad_left] * len(ver_texts)):
items = [] + texts
texts.clear()
for line in items:
wrapped = wrap(calc_d, line.text, fnt, allowed_width)
texts += [GridAnnotation(x, line.is_active) for x in wrapped]
for line in texts:
bbox = calc_d.multiline_textbbox((0, 0), line.text, font=fnt)
line.size = (bbox[2] - bbox[0], bbox[3] - bbox[1])
hor_text_heights = [sum([line.size[1] + line_spacing for line in lines]) - line_spacing for lines in hor_texts]
ver_text_heights = [sum([line.size[1] + line_spacing for line in lines]) - line_spacing * len(lines) for lines in ver_texts]
pad_top = max(hor_text_heights) + line_spacing * 2
result = Image.new("RGB", (im.width + pad_left, im.height + pad_top), "white")
result.paste(im, (pad_left, pad_top))
d = ImageDraw.Draw(result)
for col in range(cols):
x = pad_left + width * col + width / 2
y = pad_top / 2 - hor_text_heights[col] / 2
draw_texts(d, x, y, hor_texts[col])
for row in range(rows):
x = pad_left / 2
y = pad_top + height * row + height / 2 - ver_text_heights[row] / 2
draw_texts(d, x, y, ver_texts[row])
return result
def draw_prompt_matrix(im, width, height, all_prompts):
prompts = all_prompts[1:]
boundary = math.ceil(len(prompts) / 2)
prompts_horiz = prompts[:boundary]
prompts_vert = prompts[boundary:]
hor_texts = [[GridAnnotation(x, is_active=pos & (1 << i) != 0) for i, x in enumerate(prompts_horiz)] for pos in range(1 << len(prompts_horiz))]
ver_texts = [[GridAnnotation(x, is_active=pos & (1 << i) != 0) for i, x in enumerate(prompts_vert)] for pos in range(1 << len(prompts_vert))]
return draw_grid_annotations(im, width, height, hor_texts, ver_texts)
def resize_image(resize_mode, im, width, height):
def resize(im, w, h):
if opts.upscaler_for_img2img is None or opts.upscaler_for_img2img == "None" or im.mode == 'L':
return im.resize((w, h), resample=LANCZOS)
upscaler = [x for x in shared.sd_upscalers if x.name == opts.upscaler_for_img2img][0]
return upscaler.upscale(im, w, h)
if resize_mode == 0:
res = resize(im, width, height)
elif resize_mode == 1:
ratio = width / height
src_ratio = im.width / im.height
src_w = width if ratio > src_ratio else im.width * height // im.height
src_h = height if ratio <= src_ratio else im.height * width // im.width
resized = resize(im, src_w, src_h)
res = Image.new("RGB", (width, height))
res.paste(resized, box=(width // 2 - src_w // 2, height // 2 - src_h // 2))
else:
ratio = width / height
src_ratio = im.width / im.height
src_w = width if ratio < src_ratio else im.width * height // im.height
src_h = height if ratio >= src_ratio else im.height * width // im.width
resized = resize(im, src_w, src_h)
res = Image.new("RGB", (width, height))
res.paste(resized, box=(width // 2 - src_w // 2, height // 2 - src_h // 2))
if ratio < src_ratio:
fill_height = height // 2 - src_h // 2
res.paste(resized.resize((width, fill_height), box=(0, 0, width, 0)), box=(0, 0))
res.paste(resized.resize((width, fill_height), box=(0, resized.height, width, resized.height)), box=(0, fill_height + src_h))
elif ratio > src_ratio:
fill_width = width // 2 - src_w // 2
res.paste(resized.resize((fill_width, height), box=(0, 0, 0, height)), box=(0, 0))
res.paste(resized.resize((fill_width, height), box=(resized.width, 0, resized.width, height)), box=(fill_width + src_w, 0))
return res
invalid_filename_chars = '<>:"/\\|?*\n'
invalid_filename_prefix = ' '
invalid_filename_postfix = ' .'
re_nonletters = re.compile(r'[\s'+string.punctuation+']+')
max_filename_part_length = 128
def sanitize_filename_part(text, replace_spaces=True):
if replace_spaces:
text = text.replace(' ', '_')
text = text.translate({ord(x): '_' for x in invalid_filename_chars})
text = text.lstrip(invalid_filename_prefix)[:max_filename_part_length]
text = text.rstrip(invalid_filename_postfix)
return text
def apply_filename_pattern(x, p, seed, prompt):
max_prompt_words = opts.directories_max_prompt_words
if seed is not None:
x = x.replace("[seed]", str(seed))
if prompt is not None:
x = x.replace("[prompt]", sanitize_filename_part(prompt))
x = x.replace("[prompt_spaces]", sanitize_filename_part(prompt, replace_spaces=False))
if "[prompt_words]" in x:
words = [x for x in re_nonletters.split(prompt or "") if len(x) > 0]
if len(words) == 0:
words = ["empty"]
x = x.replace("[prompt_words]", sanitize_filename_part(" ".join(words[0:max_prompt_words]), replace_spaces=False))
if p is not None:
x = x.replace("[steps]", str(p.steps))
x = x.replace("[cfg]", str(p.cfg_scale))
x = x.replace("[width]", str(p.width))
x = x.replace("[height]", str(p.height))
x = x.replace("[styles]", sanitize_filename_part(", ".join(p.styles), replace_spaces=False))
x = x.replace("[sampler]", sanitize_filename_part(sd_samplers.samplers[p.sampler_index].name, replace_spaces=False))
x = x.replace("[model_hash]", shared.sd_model.sd_model_hash)
x = x.replace("[date]", datetime.date.today().isoformat())
if cmd_opts.hide_ui_dir_config:
x = re.sub(r'^[\\/]+|\.{2,}[\\/]+|[\\/]+\.{2,}', '', x)
return x
def get_next_sequence_number(path, basename):
"""
Determines and returns the next sequence number to use when saving an image in the specified directory.
The sequence starts at 0.
"""
result = -1
if basename != '':
basename = basename + "-"
prefix_length = len(basename)
for p in os.listdir(path):
if p.startswith(basename):
l = os.path.splitext(p[prefix_length:])[0].split('-') #splits the filename (removing the basename first if one is defined, so the sequence number is always the first element)
try:
result = max(int(l[0]), result)
except ValueError:
pass
return result + 1
def save_image(image, path, basename, seed=None, prompt=None, extension='png', info=None, short_filename=False, no_prompt=False, grid=False, pnginfo_section_name='parameters', p=None, existing_info=None, forced_filename=None, suffix=""):
if short_filename or prompt is None or seed is None:
file_decoration = ""
elif opts.save_to_dirs:
file_decoration = opts.samples_filename_pattern or "[seed]"
else:
file_decoration = opts.samples_filename_pattern or "[seed]-[prompt_spaces]"
if file_decoration != "":
file_decoration = "-" + file_decoration.lower()
file_decoration = apply_filename_pattern(file_decoration, p, seed, prompt) + suffix
if extension == 'png' and opts.enable_pnginfo and info is not None:
pnginfo = PngImagePlugin.PngInfo()
if existing_info is not None:
for k, v in existing_info.items():
pnginfo.add_text(k, str(v))
pnginfo.add_text(pnginfo_section_name, info)
else:
pnginfo = None
save_to_dirs = (grid and opts.grid_save_to_dirs) or (not grid and opts.save_to_dirs and not no_prompt)
if save_to_dirs:
dirname = apply_filename_pattern(opts.directories_filename_pattern or "[prompt_words]", p, seed, prompt)
path = os.path.join(path, dirname)
os.makedirs(path, exist_ok=True)
if forced_filename is None:
basecount = get_next_sequence_number(path, basename)
fullfn = "a.png"
fullfn_without_extension = "a"
for i in range(500):
fn = f"{basecount+i:05}" if basename == '' else f"{basename}-{basecount+i:04}"
fullfn = os.path.join(path, f"{fn}{file_decoration}.{extension}")
fullfn_without_extension = os.path.join(path, f"{fn}{file_decoration}")
if not os.path.exists(fullfn):
break
else:
fullfn = os.path.join(path, f"{forced_filename}.{extension}")
fullfn_without_extension = os.path.join(path, forced_filename)
def exif_bytes():
return piexif.dump({
"Exif": {
piexif.ExifIFD.UserComment: piexif.helper.UserComment.dump(info or "", encoding="unicode")
},
})
if extension.lower() in ("jpg", "jpeg", "webp"):
image.save(fullfn, quality=opts.jpeg_quality)
if opts.enable_pnginfo and info is not None:
piexif.insert(exif_bytes(), fullfn)
else:
image.save(fullfn, quality=opts.jpeg_quality, pnginfo=pnginfo)
target_side_length = 4000
oversize = image.width > target_side_length or image.height > target_side_length
if opts.export_for_4chan and (oversize or os.stat(fullfn).st_size > 4 * 1024 * 1024):
ratio = image.width / image.height
if oversize and ratio > 1:
image = image.resize((target_side_length, image.height * target_side_length // image.width), LANCZOS)
elif oversize:
image = image.resize((image.width * target_side_length // image.height, target_side_length), LANCZOS)
image.save(fullfn_without_extension + ".jpg", quality=opts.jpeg_quality)
if opts.enable_pnginfo and info is not None:
piexif.insert(exif_bytes(), fullfn_without_extension + ".jpg")
if opts.save_txt and info is not None:
with open(f"{fullfn_without_extension}.txt", "w", encoding="utf8") as file:
file.write(info + "\n")
class Upscaler:
name = "Lanczos"
def do_upscale(self, img):
return img
def upscale(self, img, w, h):
for i in range(3):
if img.width >= w and img.height >= h:
break
img = self.do_upscale(img)
if img.width != w or img.height != h:
img = img.resize((int(w), int(h)), resample=LANCZOS)
return img
class UpscalerNone(Upscaler):
name = "None"
def upscale(self, img, w, h):
return img
modules.shared.sd_upscalers.append(UpscalerNone())
modules.shared.sd_upscalers.append(Upscaler())
<|code_end|>
modules/shared.py
<|code_start|>import sys
import argparse
import json
import os
import gradio as gr
import tqdm
import modules.artists
from modules.paths import script_path, sd_path
from modules.devices import get_optimal_device
import modules.styles
import modules.interrogate
import modules.memmon
import modules.sd_models
sd_model_file = os.path.join(script_path, 'model.ckpt')
default_sd_model_file = sd_model_file
parser = argparse.ArgumentParser()
parser.add_argument("--config", type=str, default=os.path.join(sd_path, "configs/stable-diffusion/v1-inference.yaml"), help="path to config which constructs model",)
parser.add_argument("--ckpt", type=str, default=sd_model_file, help="path to checkpoint of stable diffusion model; this checkpoint will be added to the list of checkpoints and loaded by default if you don't have a checkpoint selected in settings",)
parser.add_argument("--ckpt-dir", type=str, default=os.path.join(script_path, 'models'), help="path to directory with stable diffusion checkpoints",)
parser.add_argument("--gfpgan-dir", type=str, help="GFPGAN directory", default=('./src/gfpgan' if os.path.exists('./src/gfpgan') else './GFPGAN'))
parser.add_argument("--gfpgan-model", type=str, help="GFPGAN model file name", default=None)
parser.add_argument("--no-half", action='store_true', help="do not switch the model to 16-bit floats")
parser.add_argument("--no-progressbar-hiding", action='store_true', help="do not hide progressbar in gradio UI (we hide it because it slows down ML if you have hardware acceleration in browser)")
parser.add_argument("--max-batch-count", type=int, default=16, help="maximum batch count value for the UI")
parser.add_argument("--embeddings-dir", type=str, default=os.path.join(script_path, 'embeddings'), help="embeddings directory for textual inversion (default: embeddings)")
parser.add_argument("--allow-code", action='store_true', help="allow custom script execution from webui")
parser.add_argument("--medvram", action='store_true', help="enable stable diffusion model optimizations for sacrificing a little speed for low VRM usage")
parser.add_argument("--lowvram", action='store_true', help="enable stable diffusion model optimizations for sacrificing a lot of speed for very low VRM usage")
parser.add_argument("--always-batch-cond-uncond", action='store_true', help="disables cond/uncond batching that is enabled to save memory with --medvram or --lowvram")
parser.add_argument("--unload-gfpgan", action='store_true', help="does not do anything.")
parser.add_argument("--precision", type=str, help="evaluate at this precision", choices=["full", "autocast"], default="autocast")
parser.add_argument("--share", action='store_true', help="use share=True for gradio and make the UI accessible through their site (doesn't work for me but you might have better luck)")
parser.add_argument("--esrgan-models-path", type=str, help="path to directory with ESRGAN models", default=os.path.join(script_path, 'ESRGAN'))
parser.add_argument("--swinir-models-path", type=str, help="path to directory with SwinIR models", default=os.path.join(script_path, 'SwinIR'))
parser.add_argument("--opt-split-attention", action='store_true', help="force-enables cross-attention layer optimization. By default, it's on for torch.cuda and off for other torch devices.")
parser.add_argument("--disable-opt-split-attention", action='store_true', help="force-disables cross-attention layer optimization")
parser.add_argument("--opt-split-attention-v1", action='store_true', help="enable older version of split attention optimization that does not consume all the VRAM it can find")
parser.add_argument("--listen", action='store_true', help="launch gradio with 0.0.0.0 as server name, allowing to respond to network requests")
parser.add_argument("--port", type=int, help="launch gradio with given server port, you need root/admin rights for ports < 1024, defaults to 7860 if available", default=None)
parser.add_argument("--show-negative-prompt", action='store_true', help="does not do anything", default=False)
parser.add_argument("--ui-config-file", type=str, help="filename to use for ui configuration", default=os.path.join(script_path, 'ui-config.json'))
parser.add_argument("--hide-ui-dir-config", action='store_true', help="hide directory configuration from webui", default=False)
parser.add_argument("--ui-settings-file", type=str, help="filename to use for ui settings", default=os.path.join(script_path, 'config.json'))
parser.add_argument("--gradio-debug", action='store_true', help="launch gradio with --debug option")
parser.add_argument("--gradio-auth", type=str, help='set gradio authentication like "username:password"; or comma-delimit multiple like "u1:p1,u2:p2,u3:p3"', default=None)
parser.add_argument("--opt-channelslast", action='store_true', help="change memory type for stable diffusion to channels last")
parser.add_argument("--styles-file", type=str, help="filename to use for styles", default=os.path.join(script_path, 'styles.csv'))
parser.add_argument("--autolaunch", action='store_true', help="open the webui URL in the system's default browser upon launch", default=False)
parser.add_argument("--use-textbox-seed", action='store_true', help="use textbox for seeds in UI (no up/down, but possible to input long seeds)", default=False)
cmd_opts = parser.parse_args()
device = get_optimal_device()
batch_cond_uncond = cmd_opts.always_batch_cond_uncond or not (cmd_opts.lowvram or cmd_opts.medvram)
parallel_processing_allowed = not cmd_opts.lowvram and not cmd_opts.medvram
config_filename = cmd_opts.ui_settings_file
class State:
interrupted = False
job = ""
job_no = 0
job_count = 0
sampling_step = 0
sampling_steps = 0
current_latent = None
current_image = None
current_image_sampling_step = 0
def interrupt(self):
self.interrupted = True
def nextjob(self):
self.job_no += 1
self.sampling_step = 0
self.current_image_sampling_step = 0
state = State()
artist_db = modules.artists.ArtistsDatabase(os.path.join(script_path, 'artists.csv'))
styles_filename = cmd_opts.styles_file
prompt_styles = modules.styles.StyleDatabase(styles_filename)
interrogator = modules.interrogate.InterrogateModels("interrogate")
face_restorers = []
modules.sd_models.list_models()
def realesrgan_models_names():
import modules.realesrgan_model
return [x.name for x in modules.realesrgan_model.get_realesrgan_models()]
class OptionInfo:
def __init__(self, default=None, label="", component=None, component_args=None, onchange=None):
self.default = default
self.label = label
self.component = component
self.component_args = component_args
self.onchange = onchange
self.section = None
def options_section(section_identifer, options_dict):
for k, v in options_dict.items():
v.section = section_identifer
return options_dict
hide_dirs = {"visible": not cmd_opts.hide_ui_dir_config}
options_templates = {}
options_templates.update(options_section(('saving-images', "Saving images/grids"), {
"samples_save": OptionInfo(True, "Always save all generated images"),
"samples_format": OptionInfo('png', 'File format for images'),
"samples_filename_pattern": OptionInfo("", "Images filename pattern"),
"grid_save": OptionInfo(True, "Always save all generated image grids"),
"grid_format": OptionInfo('png', 'File format for grids'),
"grid_extended_filename": OptionInfo(False, "Add extended info (seed, prompt) to filename when saving grid"),
"grid_only_if_multiple": OptionInfo(True, "Do not save grids consisting of one picture"),
"n_rows": OptionInfo(-1, "Grid row count; use -1 for autodetect and 0 for it to be same as batch size", gr.Slider, {"minimum": -1, "maximum": 16, "step": 1}),
"enable_pnginfo": OptionInfo(True, "Save text information about generation parameters as chunks to png files"),
"save_txt": OptionInfo(False, "Create a text file next to every image with generation parameters."),
"save_images_before_face_restoration": OptionInfo(False, "Save a copy of image before doing face restoration."),
"jpeg_quality": OptionInfo(80, "Quality for saved jpeg images", gr.Slider, {"minimum": 1, "maximum": 100, "step": 1}),
"export_for_4chan": OptionInfo(True, "If PNG image is larger than 4MB or any dimension is larger than 4000, downscale and save copy as JPG"),
"use_original_name_batch": OptionInfo(False, "Use original name for output filename during batch process in extras tab"),
}))
options_templates.update(options_section(('saving-paths', "Paths for saving"), {
"outdir_samples": OptionInfo("", "Output directory for images; if empty, defaults to three directories below", component_args=hide_dirs),
"outdir_txt2img_samples": OptionInfo("outputs/txt2img-images", 'Output directory for txt2img images', component_args=hide_dirs),
"outdir_img2img_samples": OptionInfo("outputs/img2img-images", 'Output directory for img2img images', component_args=hide_dirs),
"outdir_extras_samples": OptionInfo("outputs/extras-images", 'Output directory for images from extras tab', component_args=hide_dirs),
"outdir_grids": OptionInfo("", "Output directory for grids; if empty, defaults to two directories below", component_args=hide_dirs),
"outdir_txt2img_grids": OptionInfo("outputs/txt2img-grids", 'Output directory for txt2img grids', component_args=hide_dirs),
"outdir_img2img_grids": OptionInfo("outputs/img2img-grids", 'Output directory for img2img grids', component_args=hide_dirs),
"outdir_save": OptionInfo("log/images", "Directory for saving images using the Save button", component_args=hide_dirs),
}))
options_templates.update(options_section(('saving-to-dirs', "Saving to a directory"), {
"save_to_dirs": OptionInfo(False, "Save images to a subdirectory"),
"grid_save_to_dirs": OptionInfo(False, "Save grids to subdirectory"),
"directories_filename_pattern": OptionInfo("", "Directory name pattern"),
"directories_max_prompt_words": OptionInfo(8, "Max prompt words", gr.Slider, {"minimum": 1, "maximum": 20, "step": 1}),
}))
options_templates.update(options_section(('upscaling', "Upscaling"), {
"ESRGAN_tile": OptionInfo(192, "Tile size for ESRGAN upscalers. 0 = no tiling.", gr.Slider, {"minimum": 0, "maximum": 512, "step": 16}),
"ESRGAN_tile_overlap": OptionInfo(8, "Tile overlap, in pixels for ESRGAN upscalers. Low values = visible seam.", gr.Slider, {"minimum": 0, "maximum": 48, "step": 1}),
"realesrgan_enabled_models": OptionInfo(["Real-ESRGAN 4x plus", "Real-ESRGAN 4x plus anime 6B"], "Select which RealESRGAN models to show in the web UI. (Requires restart)", gr.CheckboxGroup, lambda: {"choices": realesrgan_models_names()}),
"SWIN_tile": OptionInfo(192, "Tile size for all SwinIR.", gr.Slider, {"minimum": 16, "maximum": 512, "step": 16}),
"SWIN_tile_overlap": OptionInfo(8, "Tile overlap, in pixels for SwinIR. Low values = visible seam.", gr.Slider, {"minimum": 0, "maximum": 48, "step": 1}),
"ldsr_steps": OptionInfo(100, "LDSR processing steps. Lower = faster", gr.Slider, {"minimum": 1, "maximum": 200, "step": 1}),
"ldsr_pre_down": OptionInfo(1, "LDSR Pre-process downssample scale. 1 = no down-sampling, 4 = 1/4 scale.", gr.Slider, {"minimum": 1, "maximum": 4, "step": 1}),
"ldsr_post_down": OptionInfo(1, "LDSR Post-process down-sample scale. 1 = no down-sampling, 4 = 1/4 scale.", gr.Slider, {"minimum": 1, "maximum": 4, "step": 1}),
"upscaler_for_img2img": OptionInfo(None, "Upscaler for img2img", gr.Radio, lambda: {"choices": [x.name for x in sd_upscalers]}),
}))
options_templates.update(options_section(('face-restoration', "Face restoration"), {
"face_restoration_model": OptionInfo(None, "Face restoration model", gr.Radio, lambda: {"choices": [x.name() for x in face_restorers]}),
"code_former_weight": OptionInfo(0.5, "CodeFormer weight parameter; 0 = maximum effect; 1 = minimum effect", gr.Slider, {"minimum": 0, "maximum": 1, "step": 0.01}),
"face_restoration_unload": OptionInfo(False, "Move face restoration model from VRAM into RAM after processing"),
"save_selected_only": OptionInfo(False, "When using 'Save' button, only save a single selected image"),
}))
options_templates.update(options_section(('system', "System"), {
"memmon_poll_rate": OptionInfo(8, "VRAM usage polls per second during generation. Set to 0 to disable.", gr.Slider, {"minimum": 0, "maximum": 40, "step": 1}),
"samples_log_stdout": OptionInfo(False, "Always print all generation info to standard output"),
"multiple_tqdm": OptionInfo(True, "Add a second progress bar to the console that shows progress for an entire job. Broken in PyCharm console."),
}))
options_templates.update(options_section(('sd', "Stable Diffusion"), {
"sd_model_checkpoint": OptionInfo(None, "Stable Diffusion checkpoint", gr.Radio, lambda: {"choices": [x.title for x in modules.sd_models.checkpoints_list.values()]}),
"img2img_color_correction": OptionInfo(False, "Apply color correction to img2img results to match original colors."),
"save_images_before_color_correction": OptionInfo(False, "Save a copy of image before applying color correction to img2img results"),
"img2img_fix_steps": OptionInfo(False, "With img2img, do exactly the amount of steps the slider specifies (normally you'd do less with less denoising)."),
"enable_quantization": OptionInfo(False, "Enable quantization in K samplers for sharper and cleaner results. This may change existing seeds. Requires restart to apply."),
"enable_emphasis": OptionInfo(True, "Use (text) to make model pay more attention to text and [text] to make it pay less attention"),
"enable_batch_seeds": OptionInfo(True, "Make K-diffusion samplers produce same images in a batch as when making a single image"),
"filter_nsfw": OptionInfo(False, "Filter NSFW content"),
"random_artist_categories": OptionInfo([], "Allowed categories for random artists selection when using the Roll button", gr.CheckboxGroup, {"choices": artist_db.categories()}),
}))
options_templates.update(options_section(('interrogate', "Interrogate Options"), {
"interrogate_keep_models_in_memory": OptionInfo(False, "Interrogate: keep models in VRAM"),
"interrogate_use_builtin_artists": OptionInfo(True, "Interrogate: use artists from artists.csv"),
"interrogate_clip_num_beams": OptionInfo(1, "Interrogate: num_beams for BLIP", gr.Slider, {"minimum": 1, "maximum": 16, "step": 1}),
"interrogate_clip_min_length": OptionInfo(24, "Interrogate: minimum description length (excluding artists, etc..)", gr.Slider, {"minimum": 1, "maximum": 128, "step": 1}),
"interrogate_clip_max_length": OptionInfo(48, "Interrogate: maximum description length", gr.Slider, {"minimum": 1, "maximum": 256, "step": 1}),
"interrogate_clip_dict_limit": OptionInfo(1500, "Interrogate: maximum number of lines in text file (0 = No limit)"),
}))
options_templates.update(options_section(('ui', "User interface"), {
"show_progressbar": OptionInfo(True, "Show progressbar"),
"show_progress_every_n_steps": OptionInfo(0, "Show show image creation progress every N sampling steps. Set 0 to disable.", gr.Slider, {"minimum": 0, "maximum": 32, "step": 1}),
"return_grid": OptionInfo(True, "Show grid in results for web"),
"add_model_hash_to_info": OptionInfo(True, "Add model hash to generation information"),
"font": OptionInfo("", "Font for image grids that have text"),
"js_modal_lightbox": OptionInfo(True, "Enable full page image viewer"),
"js_modal_lightbox_initialy_zoomed": OptionInfo(True, "Show images zoomed in by default in full page image viewer"),
}))
class Options:
data = None
data_labels = options_templates
typemap = {int: float}
def __init__(self):
self.data = {k: v.default for k, v in self.data_labels.items()}
def __setattr__(self, key, value):
if self.data is not None:
if key in self.data:
self.data[key] = value
return super(Options, self).__setattr__(key, value)
def __getattr__(self, item):
if self.data is not None:
if item in self.data:
return self.data[item]
if item in self.data_labels:
return self.data_labels[item].default
return super(Options, self).__getattribute__(item)
def save(self, filename):
with open(filename, "w", encoding="utf8") as file:
json.dump(self.data, file)
def same_type(self, x, y):
if x is None or y is None:
return True
type_x = self.typemap.get(type(x), type(x))
type_y = self.typemap.get(type(y), type(y))
return type_x == type_y
def load(self, filename):
with open(filename, "r", encoding="utf8") as file:
self.data = json.load(file)
bad_settings = 0
for k, v in self.data.items():
info = self.data_labels.get(k, None)
if info is not None and not self.same_type(info.default, v):
print(f"Warning: bad setting value: {k}: {v} ({type(v).__name__}; expected {type(info.default).__name__})", file=sys.stderr)
bad_settings += 1
if bad_settings > 0:
print(f"The program is likely to not work with bad settings.\nSettings file: {filename}\nEither fix the file, or delete it and restart.", file=sys.stderr)
def onchange(self, key, func):
item = self.data_labels.get(key)
item.onchange = func
def dumpjson(self):
d = {k: self.data.get(k, self.data_labels.get(k).default) for k in self.data_labels.keys()}
return json.dumps(d)
opts = Options()
if os.path.exists(config_filename):
opts.load(config_filename)
sd_upscalers = []
sd_model = None
progress_print_out = sys.stdout
class TotalTQDM:
def __init__(self):
self._tqdm = None
def reset(self):
self._tqdm = tqdm.tqdm(
desc="Total progress",
total=state.job_count * state.sampling_steps,
position=1,
file=progress_print_out
)
def update(self):
if not opts.multiple_tqdm:
return
if self._tqdm is None:
self.reset()
self._tqdm.update()
def updateTotal(self, new_total):
if not opts.multiple_tqdm:
return
if self._tqdm is None:
self.reset()
self._tqdm.total=new_total
def clear(self):
if self._tqdm is not None:
self._tqdm.close()
self._tqdm = None
total_tqdm = TotalTQDM()
mem_mon = modules.memmon.MemUsageMonitor("MemMon", device, opts)
mem_mon.start()
<|code_end|>
webui.py
<|code_start|>import os
import threading
from modules.paths import script_path
import signal
from modules.shared import opts, cmd_opts, state
import modules.shared as shared
import modules.ui
import modules.scripts
import modules.sd_hijack
import modules.codeformer_model
import modules.gfpgan_model
import modules.face_restoration
import modules.realesrgan_model as realesrgan
import modules.esrgan_model as esrgan
import modules.ldsr_model as ldsr
import modules.extras
import modules.lowvram
import modules.txt2img
import modules.img2img
import modules.swinir as swinir
import modules.sd_models
modules.codeformer_model.setup_codeformer()
modules.gfpgan_model.setup_gfpgan()
shared.face_restorers.append(modules.face_restoration.FaceRestoration())
esrgan.load_models(cmd_opts.esrgan_models_path)
swinir.load_models(cmd_opts.swinir_models_path)
realesrgan.setup_realesrgan()
ldsr.add_lsdr()
queue_lock = threading.Lock()
def wrap_queued_call(func):
def f(*args, **kwargs):
with queue_lock:
res = func(*args, **kwargs)
return res
return f
def wrap_gradio_gpu_call(func):
def f(*args, **kwargs):
shared.state.sampling_step = 0
shared.state.job_count = -1
shared.state.job_no = 0
shared.state.current_latent = None
shared.state.current_image = None
shared.state.current_image_sampling_step = 0
shared.state.interrupted = False
with queue_lock:
res = func(*args, **kwargs)
shared.state.job = ""
shared.state.job_count = 0
return res
return modules.ui.wrap_gradio_call(f)
modules.scripts.load_scripts(os.path.join(script_path, "scripts"))
shared.sd_model = modules.sd_models.load_model()
shared.opts.onchange("sd_model_checkpoint", wrap_queued_call(lambda: modules.sd_models.reload_model_weights(shared.sd_model)))
def webui():
# make the program just exit at ctrl+c without waiting for anything
def sigint_handler(sig, frame):
print(f'Interrupted with signal {sig} in {frame}')
os._exit(0)
signal.signal(signal.SIGINT, sigint_handler)
demo = modules.ui.create_ui(
txt2img=wrap_gradio_gpu_call(modules.txt2img.txt2img),
img2img=wrap_gradio_gpu_call(modules.img2img.img2img),
run_extras=wrap_gradio_gpu_call(modules.extras.run_extras),
run_pnginfo=modules.extras.run_pnginfo
)
demo.launch(
share=cmd_opts.share,
server_name="0.0.0.0" if cmd_opts.listen else None,
server_port=cmd_opts.port,
debug=cmd_opts.gradio_debug,
auth=[tuple(cred.split(':')) for cred in cmd_opts.gradio_auth.strip('"').split(',')] if cmd_opts.gradio_auth else None,
inbrowser=cmd_opts.autolaunch,
)
if __name__ == "__main__":
webui()
<|code_end|>
|
modules/images.py
<|code_start|>import datetime
import math
import os
from collections import namedtuple
import re
import numpy as np
import piexif
import piexif.helper
from PIL import Image, ImageFont, ImageDraw, PngImagePlugin
from fonts.ttf import Roboto
import string
import modules.shared
from modules import sd_samplers, shared
from modules.shared import opts, cmd_opts
LANCZOS = (Image.Resampling.LANCZOS if hasattr(Image, 'Resampling') else Image.LANCZOS)
def image_grid(imgs, batch_size=1, rows=None):
if rows is None:
if opts.n_rows > 0:
rows = opts.n_rows
elif opts.n_rows == 0:
rows = batch_size
else:
rows = math.sqrt(len(imgs))
rows = round(rows)
cols = math.ceil(len(imgs) / rows)
w, h = imgs[0].size
grid = Image.new('RGB', size=(cols * w, rows * h), color='black')
for i, img in enumerate(imgs):
grid.paste(img, box=(i % cols * w, i // cols * h))
return grid
Grid = namedtuple("Grid", ["tiles", "tile_w", "tile_h", "image_w", "image_h", "overlap"])
def split_grid(image, tile_w=512, tile_h=512, overlap=64):
w = image.width
h = image.height
non_overlap_width = tile_w - overlap
non_overlap_height = tile_h - overlap
cols = math.ceil((w - overlap) / non_overlap_width)
rows = math.ceil((h - overlap) / non_overlap_height)
dx = (w - tile_w) / (cols-1) if cols > 1 else 0
dy = (h - tile_h) / (rows-1) if rows > 1 else 0
grid = Grid([], tile_w, tile_h, w, h, overlap)
for row in range(rows):
row_images = []
y = int(row * dy)
if y + tile_h >= h:
y = h - tile_h
for col in range(cols):
x = int(col * dx)
if x+tile_w >= w:
x = w - tile_w
tile = image.crop((x, y, x + tile_w, y + tile_h))
row_images.append([x, tile_w, tile])
grid.tiles.append([y, tile_h, row_images])
return grid
def combine_grid(grid):
def make_mask_image(r):
r = r * 255 / grid.overlap
r = r.astype(np.uint8)
return Image.fromarray(r, 'L')
mask_w = make_mask_image(np.arange(grid.overlap, dtype=np.float32).reshape((1, grid.overlap)).repeat(grid.tile_h, axis=0))
mask_h = make_mask_image(np.arange(grid.overlap, dtype=np.float32).reshape((grid.overlap, 1)).repeat(grid.image_w, axis=1))
combined_image = Image.new("RGB", (grid.image_w, grid.image_h))
for y, h, row in grid.tiles:
combined_row = Image.new("RGB", (grid.image_w, h))
for x, w, tile in row:
if x == 0:
combined_row.paste(tile, (0, 0))
continue
combined_row.paste(tile.crop((0, 0, grid.overlap, h)), (x, 0), mask=mask_w)
combined_row.paste(tile.crop((grid.overlap, 0, w, h)), (x + grid.overlap, 0))
if y == 0:
combined_image.paste(combined_row, (0, 0))
continue
combined_image.paste(combined_row.crop((0, 0, combined_row.width, grid.overlap)), (0, y), mask=mask_h)
combined_image.paste(combined_row.crop((0, grid.overlap, combined_row.width, h)), (0, y + grid.overlap))
return combined_image
class GridAnnotation:
def __init__(self, text='', is_active=True):
self.text = text
self.is_active = is_active
self.size = None
def draw_grid_annotations(im, width, height, hor_texts, ver_texts):
def wrap(drawing, text, font, line_length):
lines = ['']
for word in text.split():
line = f'{lines[-1]} {word}'.strip()
if drawing.textlength(line, font=font) <= line_length:
lines[-1] = line
else:
lines.append(word)
return lines
def draw_texts(drawing, draw_x, draw_y, lines):
for i, line in enumerate(lines):
drawing.multiline_text((draw_x, draw_y + line.size[1] / 2), line.text, font=fnt, fill=color_active if line.is_active else color_inactive, anchor="mm", align="center")
if not line.is_active:
drawing.line((draw_x - line.size[0]//2, draw_y + line.size[1]//2, draw_x + line.size[0]//2, draw_y + line.size[1]//2), fill=color_inactive, width=4)
draw_y += line.size[1] + line_spacing
fontsize = (width + height) // 25
line_spacing = fontsize // 2
try:
fnt = ImageFont.truetype(opts.font or Roboto, fontsize)
except Exception:
fnt = ImageFont.truetype(Roboto, fontsize)
color_active = (0, 0, 0)
color_inactive = (153, 153, 153)
pad_left = 0 if sum([sum([len(line.text) for line in lines]) for lines in ver_texts]) == 0 else width * 3 // 4
cols = im.width // width
rows = im.height // height
assert cols == len(hor_texts), f'bad number of horizontal texts: {len(hor_texts)}; must be {cols}'
assert rows == len(ver_texts), f'bad number of vertical texts: {len(ver_texts)}; must be {rows}'
calc_img = Image.new("RGB", (1, 1), "white")
calc_d = ImageDraw.Draw(calc_img)
for texts, allowed_width in zip(hor_texts + ver_texts, [width] * len(hor_texts) + [pad_left] * len(ver_texts)):
items = [] + texts
texts.clear()
for line in items:
wrapped = wrap(calc_d, line.text, fnt, allowed_width)
texts += [GridAnnotation(x, line.is_active) for x in wrapped]
for line in texts:
bbox = calc_d.multiline_textbbox((0, 0), line.text, font=fnt)
line.size = (bbox[2] - bbox[0], bbox[3] - bbox[1])
hor_text_heights = [sum([line.size[1] + line_spacing for line in lines]) - line_spacing for lines in hor_texts]
ver_text_heights = [sum([line.size[1] + line_spacing for line in lines]) - line_spacing * len(lines) for lines in ver_texts]
pad_top = max(hor_text_heights) + line_spacing * 2
result = Image.new("RGB", (im.width + pad_left, im.height + pad_top), "white")
result.paste(im, (pad_left, pad_top))
d = ImageDraw.Draw(result)
for col in range(cols):
x = pad_left + width * col + width / 2
y = pad_top / 2 - hor_text_heights[col] / 2
draw_texts(d, x, y, hor_texts[col])
for row in range(rows):
x = pad_left / 2
y = pad_top + height * row + height / 2 - ver_text_heights[row] / 2
draw_texts(d, x, y, ver_texts[row])
return result
def draw_prompt_matrix(im, width, height, all_prompts):
prompts = all_prompts[1:]
boundary = math.ceil(len(prompts) / 2)
prompts_horiz = prompts[:boundary]
prompts_vert = prompts[boundary:]
hor_texts = [[GridAnnotation(x, is_active=pos & (1 << i) != 0) for i, x in enumerate(prompts_horiz)] for pos in range(1 << len(prompts_horiz))]
ver_texts = [[GridAnnotation(x, is_active=pos & (1 << i) != 0) for i, x in enumerate(prompts_vert)] for pos in range(1 << len(prompts_vert))]
return draw_grid_annotations(im, width, height, hor_texts, ver_texts)
def resize_image(resize_mode, im, width, height):
def resize(im, w, h):
if opts.upscaler_for_img2img is None or opts.upscaler_for_img2img == "None" or im.mode == 'L':
return im.resize((w, h), resample=LANCZOS)
upscaler = [x for x in shared.sd_upscalers if x.name == opts.upscaler_for_img2img][0]
return upscaler.upscale(im, w, h)
if resize_mode == 0:
res = resize(im, width, height)
elif resize_mode == 1:
ratio = width / height
src_ratio = im.width / im.height
src_w = width if ratio > src_ratio else im.width * height // im.height
src_h = height if ratio <= src_ratio else im.height * width // im.width
resized = resize(im, src_w, src_h)
res = Image.new("RGB", (width, height))
res.paste(resized, box=(width // 2 - src_w // 2, height // 2 - src_h // 2))
else:
ratio = width / height
src_ratio = im.width / im.height
src_w = width if ratio < src_ratio else im.width * height // im.height
src_h = height if ratio >= src_ratio else im.height * width // im.width
resized = resize(im, src_w, src_h)
res = Image.new("RGB", (width, height))
res.paste(resized, box=(width // 2 - src_w // 2, height // 2 - src_h // 2))
if ratio < src_ratio:
fill_height = height // 2 - src_h // 2
res.paste(resized.resize((width, fill_height), box=(0, 0, width, 0)), box=(0, 0))
res.paste(resized.resize((width, fill_height), box=(0, resized.height, width, resized.height)), box=(0, fill_height + src_h))
elif ratio > src_ratio:
fill_width = width // 2 - src_w // 2
res.paste(resized.resize((fill_width, height), box=(0, 0, 0, height)), box=(0, 0))
res.paste(resized.resize((fill_width, height), box=(resized.width, 0, resized.width, height)), box=(fill_width + src_w, 0))
return res
invalid_filename_chars = '<>:"/\\|?*\n'
invalid_filename_prefix = ' '
invalid_filename_postfix = ' .'
re_nonletters = re.compile(r'[\s'+string.punctuation+']+')
max_filename_part_length = 128
def sanitize_filename_part(text, replace_spaces=True):
if replace_spaces:
text = text.replace(' ', '_')
text = text.translate({ord(x): '_' for x in invalid_filename_chars})
text = text.lstrip(invalid_filename_prefix)[:max_filename_part_length]
text = text.rstrip(invalid_filename_postfix)
return text
def apply_filename_pattern(x, p, seed, prompt):
max_prompt_words = opts.directories_max_prompt_words
if seed is not None:
x = x.replace("[seed]", str(seed))
if prompt is not None:
x = x.replace("[prompt]", sanitize_filename_part(prompt))
x = x.replace("[prompt_spaces]", sanitize_filename_part(prompt, replace_spaces=False))
if "[prompt_words]" in x:
words = [x for x in re_nonletters.split(prompt or "") if len(x) > 0]
if len(words) == 0:
words = ["empty"]
x = x.replace("[prompt_words]", sanitize_filename_part(" ".join(words[0:max_prompt_words]), replace_spaces=False))
if p is not None:
x = x.replace("[steps]", str(p.steps))
x = x.replace("[cfg]", str(p.cfg_scale))
x = x.replace("[width]", str(p.width))
x = x.replace("[height]", str(p.height))
x = x.replace("[styles]", sanitize_filename_part(", ".join(p.styles), replace_spaces=False))
x = x.replace("[sampler]", sanitize_filename_part(sd_samplers.samplers[p.sampler_index].name, replace_spaces=False))
x = x.replace("[model_hash]", shared.sd_model.sd_model_hash)
x = x.replace("[date]", datetime.date.today().isoformat())
x = x.replace("[job_timestamp]", shared.state.job_timestamp)
if cmd_opts.hide_ui_dir_config:
x = re.sub(r'^[\\/]+|\.{2,}[\\/]+|[\\/]+\.{2,}', '', x)
return x
def get_next_sequence_number(path, basename):
"""
Determines and returns the next sequence number to use when saving an image in the specified directory.
The sequence starts at 0.
"""
result = -1
if basename != '':
basename = basename + "-"
prefix_length = len(basename)
for p in os.listdir(path):
if p.startswith(basename):
l = os.path.splitext(p[prefix_length:])[0].split('-') #splits the filename (removing the basename first if one is defined, so the sequence number is always the first element)
try:
result = max(int(l[0]), result)
except ValueError:
pass
return result + 1
def save_image(image, path, basename, seed=None, prompt=None, extension='png', info=None, short_filename=False, no_prompt=False, grid=False, pnginfo_section_name='parameters', p=None, existing_info=None, forced_filename=None, suffix=""):
if short_filename or prompt is None or seed is None:
file_decoration = ""
elif opts.save_to_dirs:
file_decoration = opts.samples_filename_pattern or "[seed]"
else:
file_decoration = opts.samples_filename_pattern or "[seed]-[prompt_spaces]"
if file_decoration != "":
file_decoration = "-" + file_decoration.lower()
file_decoration = apply_filename_pattern(file_decoration, p, seed, prompt) + suffix
if extension == 'png' and opts.enable_pnginfo and info is not None:
pnginfo = PngImagePlugin.PngInfo()
if existing_info is not None:
for k, v in existing_info.items():
pnginfo.add_text(k, str(v))
pnginfo.add_text(pnginfo_section_name, info)
else:
pnginfo = None
save_to_dirs = (grid and opts.grid_save_to_dirs) or (not grid and opts.save_to_dirs and not no_prompt)
if save_to_dirs:
dirname = apply_filename_pattern(opts.directories_filename_pattern or "[prompt_words]", p, seed, prompt)
path = os.path.join(path, dirname)
os.makedirs(path, exist_ok=True)
if forced_filename is None:
basecount = get_next_sequence_number(path, basename)
fullfn = "a.png"
fullfn_without_extension = "a"
for i in range(500):
fn = f"{basecount+i:05}" if basename == '' else f"{basename}-{basecount+i:04}"
fullfn = os.path.join(path, f"{fn}{file_decoration}.{extension}")
fullfn_without_extension = os.path.join(path, f"{fn}{file_decoration}")
if not os.path.exists(fullfn):
break
else:
fullfn = os.path.join(path, f"{forced_filename}.{extension}")
fullfn_without_extension = os.path.join(path, forced_filename)
def exif_bytes():
return piexif.dump({
"Exif": {
piexif.ExifIFD.UserComment: piexif.helper.UserComment.dump(info or "", encoding="unicode")
},
})
if extension.lower() in ("jpg", "jpeg", "webp"):
image.save(fullfn, quality=opts.jpeg_quality)
if opts.enable_pnginfo and info is not None:
piexif.insert(exif_bytes(), fullfn)
else:
image.save(fullfn, quality=opts.jpeg_quality, pnginfo=pnginfo)
target_side_length = 4000
oversize = image.width > target_side_length or image.height > target_side_length
if opts.export_for_4chan and (oversize or os.stat(fullfn).st_size > 4 * 1024 * 1024):
ratio = image.width / image.height
if oversize and ratio > 1:
image = image.resize((target_side_length, image.height * target_side_length // image.width), LANCZOS)
elif oversize:
image = image.resize((image.width * target_side_length // image.height, target_side_length), LANCZOS)
image.save(fullfn_without_extension + ".jpg", quality=opts.jpeg_quality)
if opts.enable_pnginfo and info is not None:
piexif.insert(exif_bytes(), fullfn_without_extension + ".jpg")
if opts.save_txt and info is not None:
with open(f"{fullfn_without_extension}.txt", "w", encoding="utf8") as file:
file.write(info + "\n")
class Upscaler:
name = "Lanczos"
def do_upscale(self, img):
return img
def upscale(self, img, w, h):
for i in range(3):
if img.width >= w and img.height >= h:
break
img = self.do_upscale(img)
if img.width != w or img.height != h:
img = img.resize((int(w), int(h)), resample=LANCZOS)
return img
class UpscalerNone(Upscaler):
name = "None"
def upscale(self, img, w, h):
return img
modules.shared.sd_upscalers.append(UpscalerNone())
modules.shared.sd_upscalers.append(Upscaler())
<|code_end|>
modules/shared.py
<|code_start|>import sys
import argparse
import json
import os
import gradio as gr
import tqdm
import datetime
import modules.artists
from modules.paths import script_path, sd_path
from modules.devices import get_optimal_device
import modules.styles
import modules.interrogate
import modules.memmon
import modules.sd_models
sd_model_file = os.path.join(script_path, 'model.ckpt')
default_sd_model_file = sd_model_file
parser = argparse.ArgumentParser()
parser.add_argument("--config", type=str, default=os.path.join(sd_path, "configs/stable-diffusion/v1-inference.yaml"), help="path to config which constructs model",)
parser.add_argument("--ckpt", type=str, default=sd_model_file, help="path to checkpoint of stable diffusion model; this checkpoint will be added to the list of checkpoints and loaded by default if you don't have a checkpoint selected in settings",)
parser.add_argument("--ckpt-dir", type=str, default=os.path.join(script_path, 'models'), help="path to directory with stable diffusion checkpoints",)
parser.add_argument("--gfpgan-dir", type=str, help="GFPGAN directory", default=('./src/gfpgan' if os.path.exists('./src/gfpgan') else './GFPGAN'))
parser.add_argument("--gfpgan-model", type=str, help="GFPGAN model file name", default=None)
parser.add_argument("--no-half", action='store_true', help="do not switch the model to 16-bit floats")
parser.add_argument("--no-progressbar-hiding", action='store_true', help="do not hide progressbar in gradio UI (we hide it because it slows down ML if you have hardware acceleration in browser)")
parser.add_argument("--max-batch-count", type=int, default=16, help="maximum batch count value for the UI")
parser.add_argument("--embeddings-dir", type=str, default=os.path.join(script_path, 'embeddings'), help="embeddings directory for textual inversion (default: embeddings)")
parser.add_argument("--allow-code", action='store_true', help="allow custom script execution from webui")
parser.add_argument("--medvram", action='store_true', help="enable stable diffusion model optimizations for sacrificing a little speed for low VRM usage")
parser.add_argument("--lowvram", action='store_true', help="enable stable diffusion model optimizations for sacrificing a lot of speed for very low VRM usage")
parser.add_argument("--always-batch-cond-uncond", action='store_true', help="disables cond/uncond batching that is enabled to save memory with --medvram or --lowvram")
parser.add_argument("--unload-gfpgan", action='store_true', help="does not do anything.")
parser.add_argument("--precision", type=str, help="evaluate at this precision", choices=["full", "autocast"], default="autocast")
parser.add_argument("--share", action='store_true', help="use share=True for gradio and make the UI accessible through their site (doesn't work for me but you might have better luck)")
parser.add_argument("--esrgan-models-path", type=str, help="path to directory with ESRGAN models", default=os.path.join(script_path, 'ESRGAN'))
parser.add_argument("--swinir-models-path", type=str, help="path to directory with SwinIR models", default=os.path.join(script_path, 'SwinIR'))
parser.add_argument("--opt-split-attention", action='store_true', help="force-enables cross-attention layer optimization. By default, it's on for torch.cuda and off for other torch devices.")
parser.add_argument("--disable-opt-split-attention", action='store_true', help="force-disables cross-attention layer optimization")
parser.add_argument("--opt-split-attention-v1", action='store_true', help="enable older version of split attention optimization that does not consume all the VRAM it can find")
parser.add_argument("--listen", action='store_true', help="launch gradio with 0.0.0.0 as server name, allowing to respond to network requests")
parser.add_argument("--port", type=int, help="launch gradio with given server port, you need root/admin rights for ports < 1024, defaults to 7860 if available", default=None)
parser.add_argument("--show-negative-prompt", action='store_true', help="does not do anything", default=False)
parser.add_argument("--ui-config-file", type=str, help="filename to use for ui configuration", default=os.path.join(script_path, 'ui-config.json'))
parser.add_argument("--hide-ui-dir-config", action='store_true', help="hide directory configuration from webui", default=False)
parser.add_argument("--ui-settings-file", type=str, help="filename to use for ui settings", default=os.path.join(script_path, 'config.json'))
parser.add_argument("--gradio-debug", action='store_true', help="launch gradio with --debug option")
parser.add_argument("--gradio-auth", type=str, help='set gradio authentication like "username:password"; or comma-delimit multiple like "u1:p1,u2:p2,u3:p3"', default=None)
parser.add_argument("--opt-channelslast", action='store_true', help="change memory type for stable diffusion to channels last")
parser.add_argument("--styles-file", type=str, help="filename to use for styles", default=os.path.join(script_path, 'styles.csv'))
parser.add_argument("--autolaunch", action='store_true', help="open the webui URL in the system's default browser upon launch", default=False)
parser.add_argument("--use-textbox-seed", action='store_true', help="use textbox for seeds in UI (no up/down, but possible to input long seeds)", default=False)
cmd_opts = parser.parse_args()
device = get_optimal_device()
batch_cond_uncond = cmd_opts.always_batch_cond_uncond or not (cmd_opts.lowvram or cmd_opts.medvram)
parallel_processing_allowed = not cmd_opts.lowvram and not cmd_opts.medvram
config_filename = cmd_opts.ui_settings_file
class State:
interrupted = False
job = ""
job_no = 0
job_count = 0
job_timestamp = 0
sampling_step = 0
sampling_steps = 0
current_latent = None
current_image = None
current_image_sampling_step = 0
def interrupt(self):
self.interrupted = True
def nextjob(self):
self.job_no += 1
self.sampling_step = 0
self.current_image_sampling_step = 0
def get_job_timestamp(self):
return datetime.datetime.now().strftime("%Y%m%d%H%M%S")
state = State()
artist_db = modules.artists.ArtistsDatabase(os.path.join(script_path, 'artists.csv'))
styles_filename = cmd_opts.styles_file
prompt_styles = modules.styles.StyleDatabase(styles_filename)
interrogator = modules.interrogate.InterrogateModels("interrogate")
face_restorers = []
modules.sd_models.list_models()
def realesrgan_models_names():
import modules.realesrgan_model
return [x.name for x in modules.realesrgan_model.get_realesrgan_models()]
class OptionInfo:
def __init__(self, default=None, label="", component=None, component_args=None, onchange=None):
self.default = default
self.label = label
self.component = component
self.component_args = component_args
self.onchange = onchange
self.section = None
def options_section(section_identifer, options_dict):
for k, v in options_dict.items():
v.section = section_identifer
return options_dict
hide_dirs = {"visible": not cmd_opts.hide_ui_dir_config}
options_templates = {}
options_templates.update(options_section(('saving-images', "Saving images/grids"), {
"samples_save": OptionInfo(True, "Always save all generated images"),
"samples_format": OptionInfo('png', 'File format for images'),
"samples_filename_pattern": OptionInfo("", "Images filename pattern"),
"grid_save": OptionInfo(True, "Always save all generated image grids"),
"grid_format": OptionInfo('png', 'File format for grids'),
"grid_extended_filename": OptionInfo(False, "Add extended info (seed, prompt) to filename when saving grid"),
"grid_only_if_multiple": OptionInfo(True, "Do not save grids consisting of one picture"),
"n_rows": OptionInfo(-1, "Grid row count; use -1 for autodetect and 0 for it to be same as batch size", gr.Slider, {"minimum": -1, "maximum": 16, "step": 1}),
"enable_pnginfo": OptionInfo(True, "Save text information about generation parameters as chunks to png files"),
"save_txt": OptionInfo(False, "Create a text file next to every image with generation parameters."),
"save_images_before_face_restoration": OptionInfo(False, "Save a copy of image before doing face restoration."),
"jpeg_quality": OptionInfo(80, "Quality for saved jpeg images", gr.Slider, {"minimum": 1, "maximum": 100, "step": 1}),
"export_for_4chan": OptionInfo(True, "If PNG image is larger than 4MB or any dimension is larger than 4000, downscale and save copy as JPG"),
"use_original_name_batch": OptionInfo(False, "Use original name for output filename during batch process in extras tab"),
}))
options_templates.update(options_section(('saving-paths', "Paths for saving"), {
"outdir_samples": OptionInfo("", "Output directory for images; if empty, defaults to three directories below", component_args=hide_dirs),
"outdir_txt2img_samples": OptionInfo("outputs/txt2img-images", 'Output directory for txt2img images', component_args=hide_dirs),
"outdir_img2img_samples": OptionInfo("outputs/img2img-images", 'Output directory for img2img images', component_args=hide_dirs),
"outdir_extras_samples": OptionInfo("outputs/extras-images", 'Output directory for images from extras tab', component_args=hide_dirs),
"outdir_grids": OptionInfo("", "Output directory for grids; if empty, defaults to two directories below", component_args=hide_dirs),
"outdir_txt2img_grids": OptionInfo("outputs/txt2img-grids", 'Output directory for txt2img grids', component_args=hide_dirs),
"outdir_img2img_grids": OptionInfo("outputs/img2img-grids", 'Output directory for img2img grids', component_args=hide_dirs),
"outdir_save": OptionInfo("log/images", "Directory for saving images using the Save button", component_args=hide_dirs),
}))
options_templates.update(options_section(('saving-to-dirs', "Saving to a directory"), {
"save_to_dirs": OptionInfo(False, "Save images to a subdirectory"),
"grid_save_to_dirs": OptionInfo(False, "Save grids to subdirectory"),
"directories_filename_pattern": OptionInfo("", "Directory name pattern"),
"directories_max_prompt_words": OptionInfo(8, "Max prompt words", gr.Slider, {"minimum": 1, "maximum": 20, "step": 1}),
}))
options_templates.update(options_section(('upscaling', "Upscaling"), {
"ESRGAN_tile": OptionInfo(192, "Tile size for ESRGAN upscalers. 0 = no tiling.", gr.Slider, {"minimum": 0, "maximum": 512, "step": 16}),
"ESRGAN_tile_overlap": OptionInfo(8, "Tile overlap, in pixels for ESRGAN upscalers. Low values = visible seam.", gr.Slider, {"minimum": 0, "maximum": 48, "step": 1}),
"realesrgan_enabled_models": OptionInfo(["Real-ESRGAN 4x plus", "Real-ESRGAN 4x plus anime 6B"], "Select which RealESRGAN models to show in the web UI. (Requires restart)", gr.CheckboxGroup, lambda: {"choices": realesrgan_models_names()}),
"SWIN_tile": OptionInfo(192, "Tile size for all SwinIR.", gr.Slider, {"minimum": 16, "maximum": 512, "step": 16}),
"SWIN_tile_overlap": OptionInfo(8, "Tile overlap, in pixels for SwinIR. Low values = visible seam.", gr.Slider, {"minimum": 0, "maximum": 48, "step": 1}),
"ldsr_steps": OptionInfo(100, "LDSR processing steps. Lower = faster", gr.Slider, {"minimum": 1, "maximum": 200, "step": 1}),
"ldsr_pre_down": OptionInfo(1, "LDSR Pre-process downssample scale. 1 = no down-sampling, 4 = 1/4 scale.", gr.Slider, {"minimum": 1, "maximum": 4, "step": 1}),
"ldsr_post_down": OptionInfo(1, "LDSR Post-process down-sample scale. 1 = no down-sampling, 4 = 1/4 scale.", gr.Slider, {"minimum": 1, "maximum": 4, "step": 1}),
"upscaler_for_img2img": OptionInfo(None, "Upscaler for img2img", gr.Radio, lambda: {"choices": [x.name for x in sd_upscalers]}),
}))
options_templates.update(options_section(('face-restoration', "Face restoration"), {
"face_restoration_model": OptionInfo(None, "Face restoration model", gr.Radio, lambda: {"choices": [x.name() for x in face_restorers]}),
"code_former_weight": OptionInfo(0.5, "CodeFormer weight parameter; 0 = maximum effect; 1 = minimum effect", gr.Slider, {"minimum": 0, "maximum": 1, "step": 0.01}),
"face_restoration_unload": OptionInfo(False, "Move face restoration model from VRAM into RAM after processing"),
"save_selected_only": OptionInfo(False, "When using 'Save' button, only save a single selected image"),
}))
options_templates.update(options_section(('system', "System"), {
"memmon_poll_rate": OptionInfo(8, "VRAM usage polls per second during generation. Set to 0 to disable.", gr.Slider, {"minimum": 0, "maximum": 40, "step": 1}),
"samples_log_stdout": OptionInfo(False, "Always print all generation info to standard output"),
"multiple_tqdm": OptionInfo(True, "Add a second progress bar to the console that shows progress for an entire job. Broken in PyCharm console."),
}))
options_templates.update(options_section(('sd', "Stable Diffusion"), {
"sd_model_checkpoint": OptionInfo(None, "Stable Diffusion checkpoint", gr.Radio, lambda: {"choices": [x.title for x in modules.sd_models.checkpoints_list.values()]}),
"img2img_color_correction": OptionInfo(False, "Apply color correction to img2img results to match original colors."),
"save_images_before_color_correction": OptionInfo(False, "Save a copy of image before applying color correction to img2img results"),
"img2img_fix_steps": OptionInfo(False, "With img2img, do exactly the amount of steps the slider specifies (normally you'd do less with less denoising)."),
"enable_quantization": OptionInfo(False, "Enable quantization in K samplers for sharper and cleaner results. This may change existing seeds. Requires restart to apply."),
"enable_emphasis": OptionInfo(True, "Use (text) to make model pay more attention to text and [text] to make it pay less attention"),
"enable_batch_seeds": OptionInfo(True, "Make K-diffusion samplers produce same images in a batch as when making a single image"),
"filter_nsfw": OptionInfo(False, "Filter NSFW content"),
"random_artist_categories": OptionInfo([], "Allowed categories for random artists selection when using the Roll button", gr.CheckboxGroup, {"choices": artist_db.categories()}),
}))
options_templates.update(options_section(('interrogate', "Interrogate Options"), {
"interrogate_keep_models_in_memory": OptionInfo(False, "Interrogate: keep models in VRAM"),
"interrogate_use_builtin_artists": OptionInfo(True, "Interrogate: use artists from artists.csv"),
"interrogate_clip_num_beams": OptionInfo(1, "Interrogate: num_beams for BLIP", gr.Slider, {"minimum": 1, "maximum": 16, "step": 1}),
"interrogate_clip_min_length": OptionInfo(24, "Interrogate: minimum description length (excluding artists, etc..)", gr.Slider, {"minimum": 1, "maximum": 128, "step": 1}),
"interrogate_clip_max_length": OptionInfo(48, "Interrogate: maximum description length", gr.Slider, {"minimum": 1, "maximum": 256, "step": 1}),
"interrogate_clip_dict_limit": OptionInfo(1500, "Interrogate: maximum number of lines in text file (0 = No limit)"),
}))
options_templates.update(options_section(('ui', "User interface"), {
"show_progressbar": OptionInfo(True, "Show progressbar"),
"show_progress_every_n_steps": OptionInfo(0, "Show show image creation progress every N sampling steps. Set 0 to disable.", gr.Slider, {"minimum": 0, "maximum": 32, "step": 1}),
"return_grid": OptionInfo(True, "Show grid in results for web"),
"add_model_hash_to_info": OptionInfo(True, "Add model hash to generation information"),
"font": OptionInfo("", "Font for image grids that have text"),
"js_modal_lightbox": OptionInfo(True, "Enable full page image viewer"),
"js_modal_lightbox_initialy_zoomed": OptionInfo(True, "Show images zoomed in by default in full page image viewer"),
}))
class Options:
data = None
data_labels = options_templates
typemap = {int: float}
def __init__(self):
self.data = {k: v.default for k, v in self.data_labels.items()}
def __setattr__(self, key, value):
if self.data is not None:
if key in self.data:
self.data[key] = value
return super(Options, self).__setattr__(key, value)
def __getattr__(self, item):
if self.data is not None:
if item in self.data:
return self.data[item]
if item in self.data_labels:
return self.data_labels[item].default
return super(Options, self).__getattribute__(item)
def save(self, filename):
with open(filename, "w", encoding="utf8") as file:
json.dump(self.data, file)
def same_type(self, x, y):
if x is None or y is None:
return True
type_x = self.typemap.get(type(x), type(x))
type_y = self.typemap.get(type(y), type(y))
return type_x == type_y
def load(self, filename):
with open(filename, "r", encoding="utf8") as file:
self.data = json.load(file)
bad_settings = 0
for k, v in self.data.items():
info = self.data_labels.get(k, None)
if info is not None and not self.same_type(info.default, v):
print(f"Warning: bad setting value: {k}: {v} ({type(v).__name__}; expected {type(info.default).__name__})", file=sys.stderr)
bad_settings += 1
if bad_settings > 0:
print(f"The program is likely to not work with bad settings.\nSettings file: {filename}\nEither fix the file, or delete it and restart.", file=sys.stderr)
def onchange(self, key, func):
item = self.data_labels.get(key)
item.onchange = func
def dumpjson(self):
d = {k: self.data.get(k, self.data_labels.get(k).default) for k in self.data_labels.keys()}
return json.dumps(d)
opts = Options()
if os.path.exists(config_filename):
opts.load(config_filename)
sd_upscalers = []
sd_model = None
progress_print_out = sys.stdout
class TotalTQDM:
def __init__(self):
self._tqdm = None
def reset(self):
self._tqdm = tqdm.tqdm(
desc="Total progress",
total=state.job_count * state.sampling_steps,
position=1,
file=progress_print_out
)
def update(self):
if not opts.multiple_tqdm:
return
if self._tqdm is None:
self.reset()
self._tqdm.update()
def updateTotal(self, new_total):
if not opts.multiple_tqdm:
return
if self._tqdm is None:
self.reset()
self._tqdm.total=new_total
def clear(self):
if self._tqdm is not None:
self._tqdm.close()
self._tqdm = None
total_tqdm = TotalTQDM()
mem_mon = modules.memmon.MemUsageMonitor("MemMon", device, opts)
mem_mon.start()
<|code_end|>
webui.py
<|code_start|>import os
import threading
from modules.paths import script_path
import signal
from modules.shared import opts, cmd_opts, state
import modules.shared as shared
import modules.ui
import modules.scripts
import modules.sd_hijack
import modules.codeformer_model
import modules.gfpgan_model
import modules.face_restoration
import modules.realesrgan_model as realesrgan
import modules.esrgan_model as esrgan
import modules.ldsr_model as ldsr
import modules.extras
import modules.lowvram
import modules.txt2img
import modules.img2img
import modules.swinir as swinir
import modules.sd_models
modules.codeformer_model.setup_codeformer()
modules.gfpgan_model.setup_gfpgan()
shared.face_restorers.append(modules.face_restoration.FaceRestoration())
esrgan.load_models(cmd_opts.esrgan_models_path)
swinir.load_models(cmd_opts.swinir_models_path)
realesrgan.setup_realesrgan()
ldsr.add_lsdr()
queue_lock = threading.Lock()
def wrap_queued_call(func):
def f(*args, **kwargs):
with queue_lock:
res = func(*args, **kwargs)
return res
return f
def wrap_gradio_gpu_call(func):
def f(*args, **kwargs):
shared.state.sampling_step = 0
shared.state.job_count = -1
shared.state.job_no = 0
shared.state.job_timestamp = shared.state.get_job_timestamp()
shared.state.current_latent = None
shared.state.current_image = None
shared.state.current_image_sampling_step = 0
shared.state.interrupted = False
with queue_lock:
res = func(*args, **kwargs)
shared.state.job = ""
shared.state.job_count = 0
return res
return modules.ui.wrap_gradio_call(f)
modules.scripts.load_scripts(os.path.join(script_path, "scripts"))
shared.sd_model = modules.sd_models.load_model()
shared.opts.onchange("sd_model_checkpoint", wrap_queued_call(lambda: modules.sd_models.reload_model_weights(shared.sd_model)))
def webui():
# make the program just exit at ctrl+c without waiting for anything
def sigint_handler(sig, frame):
print(f'Interrupted with signal {sig} in {frame}')
os._exit(0)
signal.signal(signal.SIGINT, sigint_handler)
demo = modules.ui.create_ui(
txt2img=wrap_gradio_gpu_call(modules.txt2img.txt2img),
img2img=wrap_gradio_gpu_call(modules.img2img.img2img),
run_extras=wrap_gradio_gpu_call(modules.extras.run_extras),
run_pnginfo=modules.extras.run_pnginfo
)
demo.launch(
share=cmd_opts.share,
server_name="0.0.0.0" if cmd_opts.listen else None,
server_port=cmd_opts.port,
debug=cmd_opts.gradio_debug,
auth=[tuple(cred.split(':')) for cred in cmd_opts.gradio_auth.strip('"').split(',')] if cmd_opts.gradio_auth else None,
inbrowser=cmd_opts.autolaunch,
)
if __name__ == "__main__":
webui()
<|code_end|>
|
Img2Img alt should divide by sigma[-1], not std
https://github.com/AUTOMATIC1111/stable-diffusion-webui/blob/53651696dc1492f3b7cc009d64a55532bc787aa7/scripts/img2imgalt.py#L59
While the noise used in stable diffusion is generated from a normal distribution, that doesn't mean that it is always perfectly normal. Normalizing the reverse-generated noise results in incorrect saturation in the output image.
I've been able to solve the saturation issue by dividing by the first sigma. I spent some time verifying the img2imgalt technique by attempting to re-derive it. Dividing by the first sigma appears to be the correct approach.
See my contribution to another repo here:
code: https://github.com/sd-webui/stable-diffusion-webui/blob/17748cbc9c34df44d0381c42e4f0fe1903089438/scripts/sd_utils.py#L525
original pr: https://github.com/sd-webui/stable-diffusion-webui/pull/1070/files#diff-2e278c1b9a8c0e308b8272729de19c973ac24710b3467dfb9c877db5d1cf7a3f
Edit:
The changes by MartinCairnsSQL below are _also_ required. They're from the original author's gist.
https://github.com/AUTOMATIC1111/stable-diffusion-webui/issues/736#issuecomment-1252538516
|
scripts/img2imgalt.py
<|code_start|>from collections import namedtuple
import numpy as np
from tqdm import trange
import modules.scripts as scripts
import gradio as gr
from modules import processing, shared, sd_samplers, prompt_parser
from modules.processing import Processed
from modules.sd_samplers import samplers
from modules.shared import opts, cmd_opts, state
import torch
import k_diffusion as K
from PIL import Image
from torch import autocast
from einops import rearrange, repeat
def find_noise_for_image(p, cond, uncond, cfg_scale, steps):
x = p.init_latent
s_in = x.new_ones([x.shape[0]])
dnw = K.external.CompVisDenoiser(shared.sd_model)
sigmas = dnw.get_sigmas(steps).flip(0)
shared.state.sampling_steps = steps
for i in trange(1, len(sigmas)):
shared.state.sampling_step += 1
x_in = torch.cat([x] * 2)
sigma_in = torch.cat([sigmas[i] * s_in] * 2)
cond_in = torch.cat([uncond, cond])
c_out, c_in = [K.utils.append_dims(k, x_in.ndim) for k in dnw.get_scalings(sigma_in)]
t = dnw.sigma_to_t(sigma_in)
eps = shared.sd_model.apply_model(x_in * c_in, t, cond=cond_in)
denoised_uncond, denoised_cond = (x_in + eps * c_out).chunk(2)
denoised = denoised_uncond + (denoised_cond - denoised_uncond) * cfg_scale
d = (x - denoised) / sigmas[i]
dt = sigmas[i] - sigmas[i - 1]
x = x + d * dt
sd_samplers.store_latent(x)
# This shouldn't be necessary, but solved some VRAM issues
del x_in, sigma_in, cond_in, c_out, c_in, t,
del eps, denoised_uncond, denoised_cond, denoised, d, dt
shared.state.nextjob()
return x / x.std()
Cached = namedtuple("Cached", ["noise", "cfg_scale", "steps", "latent", "original_prompt", "original_negative_prompt"])
class Script(scripts.Script):
def __init__(self):
self.cache = None
def title(self):
return "img2img alternative test"
def show(self, is_img2img):
return is_img2img
def ui(self, is_img2img):
original_prompt = gr.Textbox(label="Original prompt", lines=1)
original_negative_prompt = gr.Textbox(label="Original negative prompt", lines=1)
cfg = gr.Slider(label="Decode CFG scale", minimum=0.0, maximum=15.0, step=0.1, value=1.0)
st = gr.Slider(label="Decode steps", minimum=1, maximum=150, step=1, value=50)
randomness = gr.Slider(label="Randomness", minimum=0.0, maximum=1.0, step=0.01, value=0.0)
return [original_prompt, original_negative_prompt, cfg, st, randomness]
def run(self, p, original_prompt, original_negative_prompt, cfg, st, randomness):
p.batch_size = 1
p.batch_count = 1
def sample_extra(conditioning, unconditional_conditioning, seeds, subseeds, subseed_strength):
lat = (p.init_latent.cpu().numpy() * 10).astype(int)
same_params = self.cache is not None and self.cache.cfg_scale == cfg and self.cache.steps == st and self.cache.original_prompt == original_prompt and self.cache.original_negative_prompt == original_negative_prompt
same_everything = same_params and self.cache.latent.shape == lat.shape and np.abs(self.cache.latent-lat).sum() < 100
if same_everything:
rec_noise = self.cache.noise
else:
shared.state.job_count += 1
cond = p.sd_model.get_learned_conditioning(p.batch_size * [original_prompt])
uncond = p.sd_model.get_learned_conditioning(p.batch_size * [original_negative_prompt])
rec_noise = find_noise_for_image(p, cond, uncond, cfg, st)
self.cache = Cached(rec_noise, cfg, st, lat, original_prompt, original_negative_prompt)
rand_noise = processing.create_random_tensors(p.init_latent.shape[1:], [p.seed + x + 1 for x in range(p.init_latent.shape[0])])
combined_noise = ((1 - randomness) * rec_noise + randomness * rand_noise) / ((randomness**2 + (1-randomness)**2) ** 0.5)
sampler = samplers[p.sampler_index].constructor(p.sd_model)
sigmas = sampler.model_wrap.get_sigmas(p.steps)
noise_dt = combined_noise - (p.init_latent / sigmas[0])
p.seed = p.seed + 1
return sampler.sample_img2img(p, p.init_latent, noise_dt, conditioning, unconditional_conditioning)
p.sample = sample_extra
p.extra_generation_params["Decode prompt"] = original_prompt
p.extra_generation_params["Decode negative prompt"] = original_negative_prompt
p.extra_generation_params["Decode CFG scale"] = cfg
p.extra_generation_params["Decode steps"] = st
p.extra_generation_params["Randomness"] = randomness
processed = processing.process_images(p)
return processed
<|code_end|>
|
scripts/img2imgalt.py
<|code_start|>from collections import namedtuple
import numpy as np
from tqdm import trange
import modules.scripts as scripts
import gradio as gr
from modules import processing, shared, sd_samplers, prompt_parser
from modules.processing import Processed
from modules.sd_samplers import samplers
from modules.shared import opts, cmd_opts, state
import torch
import k_diffusion as K
from PIL import Image
from torch import autocast
from einops import rearrange, repeat
def find_noise_for_image(p, cond, uncond, cfg_scale, steps):
x = p.init_latent
s_in = x.new_ones([x.shape[0]])
dnw = K.external.CompVisDenoiser(shared.sd_model)
sigmas = dnw.get_sigmas(steps).flip(0)
shared.state.sampling_steps = steps
for i in trange(1, len(sigmas)):
shared.state.sampling_step += 1
x_in = torch.cat([x] * 2)
sigma_in = torch.cat([sigmas[i] * s_in] * 2)
cond_in = torch.cat([uncond, cond])
c_out, c_in = [K.utils.append_dims(k, x_in.ndim) for k in dnw.get_scalings(sigma_in)]
t = dnw.sigma_to_t(sigma_in)
eps = shared.sd_model.apply_model(x_in * c_in, t, cond=cond_in)
denoised_uncond, denoised_cond = (x_in + eps * c_out).chunk(2)
denoised = denoised_uncond + (denoised_cond - denoised_uncond) * cfg_scale
d = (x - denoised) / sigmas[i]
dt = sigmas[i] - sigmas[i - 1]
x = x + d * dt
sd_samplers.store_latent(x)
# This shouldn't be necessary, but solved some VRAM issues
del x_in, sigma_in, cond_in, c_out, c_in, t,
del eps, denoised_uncond, denoised_cond, denoised, d, dt
shared.state.nextjob()
return x / x.std()
Cached = namedtuple("Cached", ["noise", "cfg_scale", "steps", "latent", "original_prompt", "original_negative_prompt", "sigma_adjustment"])
# Based on changes suggested by briansemrau in https://github.com/AUTOMATIC1111/stable-diffusion-webui/issues/736
def find_noise_for_image_sigma_adjustment(p, cond, uncond, cfg_scale, steps):
x = p.init_latent
s_in = x.new_ones([x.shape[0]])
dnw = K.external.CompVisDenoiser(shared.sd_model)
sigmas = dnw.get_sigmas(steps).flip(0)
shared.state.sampling_steps = steps
for i in trange(1, len(sigmas)):
shared.state.sampling_step += 1
x_in = torch.cat([x] * 2)
sigma_in = torch.cat([sigmas[i - 1] * s_in] * 2)
cond_in = torch.cat([uncond, cond])
c_out, c_in = [K.utils.append_dims(k, x_in.ndim) for k in dnw.get_scalings(sigma_in)]
if i == 1:
t = dnw.sigma_to_t(torch.cat([sigmas[i] * s_in] * 2))
else:
t = dnw.sigma_to_t(sigma_in)
eps = shared.sd_model.apply_model(x_in * c_in, t, cond=cond_in)
denoised_uncond, denoised_cond = (x_in + eps * c_out).chunk(2)
denoised = denoised_uncond + (denoised_cond - denoised_uncond) * cfg_scale
if i == 1:
d = (x - denoised) / (2 * sigmas[i])
else:
d = (x - denoised) / sigmas[i - 1]
dt = sigmas[i] - sigmas[i - 1]
x = x + d * dt
sd_samplers.store_latent(x)
# This shouldn't be necessary, but solved some VRAM issues
del x_in, sigma_in, cond_in, c_out, c_in, t,
del eps, denoised_uncond, denoised_cond, denoised, d, dt
shared.state.nextjob()
return x / sigmas[-1]
class Script(scripts.Script):
def __init__(self):
self.cache = None
def title(self):
return "img2img alternative test"
def show(self, is_img2img):
return is_img2img
def ui(self, is_img2img):
original_prompt = gr.Textbox(label="Original prompt", lines=1)
original_negative_prompt = gr.Textbox(label="Original negative prompt", lines=1)
cfg = gr.Slider(label="Decode CFG scale", minimum=0.0, maximum=15.0, step=0.1, value=1.0)
st = gr.Slider(label="Decode steps", minimum=1, maximum=150, step=1, value=50)
randomness = gr.Slider(label="Randomness", minimum=0.0, maximum=1.0, step=0.01, value=0.0)
sigma_adjustment = gr.Checkbox(label="Sigma adjustment for finding noise for image", value=False)
return [original_prompt, original_negative_prompt, cfg, st, randomness, sigma_adjustment]
def run(self, p, original_prompt, original_negative_prompt, cfg, st, randomness, sigma_adjustment):
p.batch_size = 1
p.batch_count = 1
def sample_extra(conditioning, unconditional_conditioning, seeds, subseeds, subseed_strength):
lat = (p.init_latent.cpu().numpy() * 10).astype(int)
same_params = self.cache is not None and self.cache.cfg_scale == cfg and self.cache.steps == st \
and self.cache.original_prompt == original_prompt \
and self.cache.original_negative_prompt == original_negative_prompt \
and self.cache.sigma_adjustment == sigma_adjustment
same_everything = same_params and self.cache.latent.shape == lat.shape and np.abs(self.cache.latent-lat).sum() < 100
if same_everything:
rec_noise = self.cache.noise
else:
shared.state.job_count += 1
cond = p.sd_model.get_learned_conditioning(p.batch_size * [original_prompt])
uncond = p.sd_model.get_learned_conditioning(p.batch_size * [original_negative_prompt])
if sigma_adjustment:
rec_noise = find_noise_for_image_sigma_adjustment(p, cond, uncond, cfg, st)
else:
rec_noise = find_noise_for_image(p, cond, uncond, cfg, st)
self.cache = Cached(rec_noise, cfg, st, lat, original_prompt, original_negative_prompt, sigma_adjustment)
rand_noise = processing.create_random_tensors(p.init_latent.shape[1:], [p.seed + x + 1 for x in range(p.init_latent.shape[0])])
combined_noise = ((1 - randomness) * rec_noise + randomness * rand_noise) / ((randomness**2 + (1-randomness)**2) ** 0.5)
sampler = samplers[p.sampler_index].constructor(p.sd_model)
sigmas = sampler.model_wrap.get_sigmas(p.steps)
noise_dt = combined_noise - (p.init_latent / sigmas[0])
p.seed = p.seed + 1
return sampler.sample_img2img(p, p.init_latent, noise_dt, conditioning, unconditional_conditioning)
p.sample = sample_extra
p.extra_generation_params["Decode prompt"] = original_prompt
p.extra_generation_params["Decode negative prompt"] = original_negative_prompt
p.extra_generation_params["Decode CFG scale"] = cfg
p.extra_generation_params["Decode steps"] = st
p.extra_generation_params["Randomness"] = randomness
p.extra_generation_params["Sigma Adjustment"] = sigma_adjustment
processed = processing.process_images(p)
return processed
<|code_end|>
|
Option for sound effect/notification upon job completion
Problem:
I often alt-tab and work on other things while the images are generating. Then it's usually half an hour passed when I realize that the job is done.
Solution request:
Option for sound effect/notification upon job completion.
|
modules/ui.py
<|code_start|>import base64
import html
import io
import json
import math
import mimetypes
import os
import random
import sys
import time
import traceback
import numpy as np
import torch
from PIL import Image
import gradio as gr
import gradio.utils
import gradio.routes
from modules.paths import script_path
from modules.shared import opts, cmd_opts
import modules.shared as shared
from modules.sd_samplers import samplers, samplers_for_img2img
import modules.ldsr_model
import modules.scripts
import modules.gfpgan_model
import modules.codeformer_model
import modules.styles
import modules.generation_parameters_copypaste
# this is a fix for Windows users. Without it, javascript files will be served with text/html content-type and the bowser will not show any UI
mimetypes.init()
mimetypes.add_type('application/javascript', '.js')
if not cmd_opts.share and not cmd_opts.listen:
# fix gradio phoning home
gradio.utils.version_check = lambda: None
gradio.utils.get_local_ip_address = lambda: '127.0.0.1'
def gr_show(visible=True):
return {"visible": visible, "__type__": "update"}
sample_img2img = "assets/stable-samples/img2img/sketch-mountains-input.jpg"
sample_img2img = sample_img2img if os.path.exists(sample_img2img) else None
css_hide_progressbar = """
.wrap .m-12 svg { display:none!important; }
.progress-bar { display:none!important; }
.meta-text { display:none!important; }
"""
# Using constants for these since the variation selector isn't visible.
# Important that they exactly match script.js for tooltip to work.
random_symbol = '\U0001f3b2\ufe0f' # 🎲️
reuse_symbol = '\u267b\ufe0f' # ♻️
art_symbol = '\U0001f3a8' # 🎨
paste_symbol = '\u2199\ufe0f' # ↙
def plaintext_to_html(text):
text = "<p>" + "<br>\n".join([f"{html.escape(x)}" for x in text.split('\n')]) + "</p>"
return text
def image_from_url_text(filedata):
if type(filedata) == list:
if len(filedata) == 0:
return None
filedata = filedata[0]
if filedata.startswith("data:image/png;base64,"):
filedata = filedata[len("data:image/png;base64,"):]
filedata = base64.decodebytes(filedata.encode('utf-8'))
image = Image.open(io.BytesIO(filedata))
return image
def send_gradio_gallery_to_image(x):
if len(x) == 0:
return None
return image_from_url_text(x[0])
def save_files(js_data, images, index):
import csv
os.makedirs(opts.outdir_save, exist_ok=True)
filenames = []
data = json.loads(js_data)
if index > -1 and opts.save_selected_only and (index > 0 or not opts.return_grid): # ensures we are looking at a specific non-grid picture, and we have save_selected_only
images = [images[index]]
data["seed"] += (index - 1 if opts.return_grid else index)
with open(os.path.join(opts.outdir_save, "log.csv"), "a", encoding="utf8", newline='') as file:
at_start = file.tell() == 0
writer = csv.writer(file)
if at_start:
writer.writerow(["prompt", "seed", "width", "height", "sampler", "cfgs", "steps", "filename", "negative_prompt"])
filename_base = str(int(time.time() * 1000))
for i, filedata in enumerate(images):
filename = filename_base + ("" if len(images) == 1 else "-" + str(i + 1)) + ".png"
filepath = os.path.join(opts.outdir_save, filename)
if filedata.startswith("data:image/png;base64,"):
filedata = filedata[len("data:image/png;base64,"):]
with open(filepath, "wb") as imgfile:
imgfile.write(base64.decodebytes(filedata.encode('utf-8')))
filenames.append(filename)
writer.writerow([data["prompt"], data["seed"], data["width"], data["height"], data["sampler"], data["cfg_scale"], data["steps"], filenames[0], data["negative_prompt"]])
return '', '', plaintext_to_html(f"Saved: {filenames[0]}")
def wrap_gradio_call(func):
def f(*args, **kwargs):
run_memmon = opts.memmon_poll_rate > 0 and not shared.mem_mon.disabled
if run_memmon:
shared.mem_mon.monitor()
t = time.perf_counter()
try:
res = list(func(*args, **kwargs))
except Exception as e:
print("Error completing request", file=sys.stderr)
print("Arguments:", args, kwargs, file=sys.stderr)
print(traceback.format_exc(), file=sys.stderr)
shared.state.job = ""
shared.state.job_count = 0
res = [None, '', f"<div class='error'>{plaintext_to_html(type(e).__name__+': '+str(e))}</div>"]
elapsed = time.perf_counter() - t
if run_memmon:
mem_stats = {k: -(v//-(1024*1024)) for k, v in shared.mem_mon.stop().items()}
active_peak = mem_stats['active_peak']
reserved_peak = mem_stats['reserved_peak']
sys_peak = mem_stats['system_peak']
sys_total = mem_stats['total']
sys_pct = round(sys_peak/max(sys_total, 1) * 100, 2)
vram_html = f"<p class='vram'>Torch active/reserved: {active_peak}/{reserved_peak} MiB, <wbr>Sys VRAM: {sys_peak}/{sys_total} MiB ({sys_pct}%)</p>"
else:
vram_html = ''
# last item is always HTML
res[-1] += f"<div class='performance'><p class='time'>Time taken: <wbr>{elapsed:.2f}s</p>{vram_html}</div>"
shared.state.interrupted = False
return tuple(res)
return f
def check_progress_call(id_part):
if shared.state.job_count == 0:
return "", gr_show(False), gr_show(False)
progress = 0
if shared.state.job_count > 0:
progress += shared.state.job_no / shared.state.job_count
if shared.state.sampling_steps > 0:
progress += 1 / shared.state.job_count * shared.state.sampling_step / shared.state.sampling_steps
progress = min(progress, 1)
progressbar = ""
if opts.show_progressbar:
progressbar = f"""<div class='progressDiv'><div class='progress' style="width:{progress * 100}%">{str(int(progress*100))+"%" if progress > 0.01 else ""}</div></div>"""
image = gr_show(False)
preview_visibility = gr_show(False)
if opts.show_progress_every_n_steps > 0:
if shared.parallel_processing_allowed:
if shared.state.sampling_step - shared.state.current_image_sampling_step >= opts.show_progress_every_n_steps and shared.state.current_latent is not None:
shared.state.current_image = modules.sd_samplers.sample_to_image(shared.state.current_latent)
shared.state.current_image_sampling_step = shared.state.sampling_step
image = shared.state.current_image
if image is None:
image = gr.update(value=None)
else:
preview_visibility = gr_show(True)
return f"<span id='{id_part}_progress_span' style='display: none'>{time.time()}</span><p>{progressbar}</p>", preview_visibility, image
def check_progress_call_initial(id_part):
shared.state.job_count = -1
shared.state.current_latent = None
shared.state.current_image = None
return check_progress_call(id_part)
def roll_artist(prompt):
allowed_cats = set([x for x in shared.artist_db.categories() if len(opts.random_artist_categories)==0 or x in opts.random_artist_categories])
artist = random.choice([x for x in shared.artist_db.artists if x.category in allowed_cats])
return prompt + ", " + artist.name if prompt != '' else artist.name
def visit(x, func, path=""):
if hasattr(x, 'children'):
for c in x.children:
visit(c, func, path)
elif x.label is not None:
func(path + "/" + str(x.label), x)
def add_style(name: str, prompt: str, negative_prompt: str):
if name is None:
return [gr_show(), gr_show()]
style = modules.styles.PromptStyle(name, prompt, negative_prompt)
shared.prompt_styles.styles[style.name] = style
# Save all loaded prompt styles: this allows us to update the storage format in the future more easily, because we
# reserialize all styles every time we save them
shared.prompt_styles.save_styles(shared.styles_filename)
return [gr.Dropdown.update(visible=True, choices=list(shared.prompt_styles.styles)) for _ in range(4)]
def apply_styles(prompt, prompt_neg, style1_name, style2_name):
prompt = shared.prompt_styles.apply_styles_to_prompt(prompt, [style1_name, style2_name])
prompt_neg = shared.prompt_styles.apply_negative_styles_to_prompt(prompt_neg, [style1_name, style2_name])
return [gr.Textbox.update(value=prompt), gr.Textbox.update(value=prompt_neg), gr.Dropdown.update(value="None"), gr.Dropdown.update(value="None")]
def interrogate(image):
prompt = shared.interrogator.interrogate(image)
return gr_show(True) if prompt is None else prompt
def create_seed_inputs():
with gr.Row():
with gr.Box():
with gr.Row(elem_id='seed_row'):
seed = (gr.Textbox if cmd_opts.use_textbox_seed else gr.Number)(label='Seed', value=-1)
seed.style(container=False)
random_seed = gr.Button(random_symbol, elem_id='random_seed')
reuse_seed = gr.Button(reuse_symbol, elem_id='reuse_seed')
with gr.Box(elem_id='subseed_show_box'):
seed_checkbox = gr.Checkbox(label='Extra', elem_id='subseed_show', value=False)
# Components to show/hide based on the 'Extra' checkbox
seed_extras = []
with gr.Row(visible=False) as seed_extra_row_1:
seed_extras.append(seed_extra_row_1)
with gr.Box():
with gr.Row(elem_id='subseed_row'):
subseed = gr.Number(label='Variation seed', value=-1)
subseed.style(container=False)
random_subseed = gr.Button(random_symbol, elem_id='random_subseed')
reuse_subseed = gr.Button(reuse_symbol, elem_id='reuse_subseed')
subseed_strength = gr.Slider(label='Variation strength', value=0.0, minimum=0, maximum=1, step=0.01)
with gr.Row(visible=False) as seed_extra_row_2:
seed_extras.append(seed_extra_row_2)
seed_resize_from_w = gr.Slider(minimum=0, maximum=2048, step=64, label="Resize seed from width", value=0)
seed_resize_from_h = gr.Slider(minimum=0, maximum=2048, step=64, label="Resize seed from height", value=0)
random_seed.click(fn=lambda: -1, show_progress=False, inputs=[], outputs=[seed])
random_subseed.click(fn=lambda: -1, show_progress=False, inputs=[], outputs=[subseed])
def change_visibility(show):
return {comp: gr_show(show) for comp in seed_extras}
seed_checkbox.change(change_visibility, show_progress=False, inputs=[seed_checkbox], outputs=seed_extras)
return seed, reuse_seed, subseed, reuse_subseed, subseed_strength, seed_resize_from_h, seed_resize_from_w, seed_checkbox
def connect_reuse_seed(seed: gr.Number, reuse_seed: gr.Button, generation_info: gr.Textbox, dummy_component, is_subseed):
""" Connects a 'reuse (sub)seed' button's click event so that it copies last used
(sub)seed value from generation info the to the seed field. If copying subseed and subseed strength
was 0, i.e. no variation seed was used, it copies the normal seed value instead."""
def copy_seed(gen_info_string: str, index):
res = -1
try:
gen_info = json.loads(gen_info_string)
index -= gen_info.get('index_of_first_image', 0)
if is_subseed and gen_info.get('subseed_strength', 0) > 0:
all_subseeds = gen_info.get('all_subseeds', [-1])
res = all_subseeds[index if 0 <= index < len(all_subseeds) else 0]
else:
all_seeds = gen_info.get('all_seeds', [-1])
res = all_seeds[index if 0 <= index < len(all_seeds) else 0]
except json.decoder.JSONDecodeError as e:
if gen_info_string != '':
print("Error parsing JSON generation info:", file=sys.stderr)
print(gen_info_string, file=sys.stderr)
return [res, gr_show(False)]
reuse_seed.click(
fn=copy_seed,
_js="(x, y) => [x, selected_gallery_index()]",
show_progress=False,
inputs=[generation_info, dummy_component],
outputs=[seed, dummy_component]
)
def create_toprow(is_img2img):
id_part = "img2img" if is_img2img else "txt2img"
with gr.Row(elem_id="toprow"):
with gr.Column(scale=4):
with gr.Row():
with gr.Column(scale=80):
with gr.Row():
prompt = gr.Textbox(label="Prompt", elem_id="prompt", show_label=False, placeholder="Prompt", lines=2)
with gr.Column(scale=1, elem_id="roll_col"):
roll = gr.Button(value=art_symbol, elem_id="roll", visible=len(shared.artist_db.artists) > 0)
paste = gr.Button(value=paste_symbol, elem_id="paste")
with gr.Column(scale=10, elem_id="style_pos_col"):
prompt_style = gr.Dropdown(label="Style 1", elem_id=f"{id_part}_style_index", choices=[k for k, v in shared.prompt_styles.styles.items()], value=next(iter(shared.prompt_styles.styles.keys())), visible=len(shared.prompt_styles.styles) > 1)
with gr.Row():
with gr.Column(scale=8):
negative_prompt = gr.Textbox(label="Negative prompt", elem_id="negative_prompt", show_label=False, placeholder="Negative prompt", lines=2)
with gr.Column(scale=1, elem_id="style_neg_col"):
prompt_style2 = gr.Dropdown(label="Style 2", elem_id=f"{id_part}_style2_index", choices=[k for k, v in shared.prompt_styles.styles.items()], value=next(iter(shared.prompt_styles.styles.keys())), visible=len(shared.prompt_styles.styles) > 1)
with gr.Column(scale=1):
with gr.Row():
interrupt = gr.Button('Interrupt', elem_id=f"{id_part}_interrupt")
submit = gr.Button('Generate', elem_id="generate", variant='primary')
interrupt.click(
fn=lambda: shared.state.interrupt(),
inputs=[],
outputs=[],
)
with gr.Row():
if is_img2img:
interrogate = gr.Button('Interrogate', elem_id="interrogate")
else:
interrogate = None
prompt_style_apply = gr.Button('Apply style', elem_id="style_apply")
save_style = gr.Button('Create style', elem_id="style_create")
return prompt, roll, prompt_style, negative_prompt, prompt_style2, submit, interrogate, prompt_style_apply, save_style, paste
def setup_progressbar(progressbar, preview, id_part):
check_progress = gr.Button('Check progress', elem_id=f"{id_part}_check_progress", visible=False)
check_progress.click(
fn=lambda: check_progress_call(id_part),
show_progress=False,
inputs=[],
outputs=[progressbar, preview, preview],
)
check_progress_initial = gr.Button('Check progress (first)', elem_id=f"{id_part}_check_progress_initial", visible=False)
check_progress_initial.click(
fn=lambda: check_progress_call_initial(id_part),
show_progress=False,
inputs=[],
outputs=[progressbar, preview, preview],
)
def create_ui(txt2img, img2img, run_extras, run_pnginfo):
with gr.Blocks(analytics_enabled=False) as txt2img_interface:
txt2img_prompt, roll, txt2img_prompt_style, txt2img_negative_prompt, txt2img_prompt_style2, submit, _, txt2img_prompt_style_apply, txt2img_save_style, paste = create_toprow(is_img2img=False)
dummy_component = gr.Label(visible=False)
with gr.Row(elem_id='txt2img_progress_row'):
with gr.Column(scale=1):
pass
with gr.Column(scale=1):
progressbar = gr.HTML(elem_id="txt2img_progressbar")
txt2img_preview = gr.Image(elem_id='txt2img_preview', visible=False)
setup_progressbar(progressbar, txt2img_preview, 'txt2img')
with gr.Row().style(equal_height=False):
with gr.Column(variant='panel'):
steps = gr.Slider(minimum=1, maximum=150, step=1, label="Sampling Steps", value=20)
sampler_index = gr.Radio(label='Sampling method', elem_id="txt2img_sampling", choices=[x.name for x in samplers], value=samplers[0].name, type="index")
with gr.Group():
width = gr.Slider(minimum=64, maximum=2048, step=64, label="Width", value=512)
height = gr.Slider(minimum=64, maximum=2048, step=64, label="Height", value=512)
with gr.Row():
restore_faces = gr.Checkbox(label='Restore faces', value=False, visible=len(shared.face_restorers) > 1)
tiling = gr.Checkbox(label='Tiling', value=False)
enable_hr = gr.Checkbox(label='Highres. fix', value=False)
with gr.Row(visible=False) as hr_options:
scale_latent = gr.Checkbox(label='Scale latent', value=False)
denoising_strength = gr.Slider(minimum=0.0, maximum=1.0, step=0.01, label='Denoising strength', value=0.7)
with gr.Row():
batch_count = gr.Slider(minimum=1, maximum=cmd_opts.max_batch_count, step=1, label='Batch count', value=1)
batch_size = gr.Slider(minimum=1, maximum=8, step=1, label='Batch size', value=1)
cfg_scale = gr.Slider(minimum=1.0, maximum=30.0, step=0.5, label='CFG Scale', value=7.0)
seed, reuse_seed, subseed, reuse_subseed, subseed_strength, seed_resize_from_h, seed_resize_from_w, seed_checkbox = create_seed_inputs()
with gr.Group():
custom_inputs = modules.scripts.scripts_txt2img.setup_ui(is_img2img=False)
with gr.Column(variant='panel'):
with gr.Group():
txt2img_preview = gr.Image(elem_id='txt2img_preview', visible=False)
txt2img_gallery = gr.Gallery(label='Output', show_label=False, elem_id='txt2img_gallery').style(grid=4)
with gr.Group():
with gr.Row():
save = gr.Button('Save')
send_to_img2img = gr.Button('Send to img2img')
send_to_inpaint = gr.Button('Send to inpaint')
send_to_extras = gr.Button('Send to extras')
with gr.Group():
html_info = gr.HTML()
generation_info = gr.Textbox(visible=False)
connect_reuse_seed(seed, reuse_seed, generation_info, dummy_component, is_subseed=False)
connect_reuse_seed(subseed, reuse_subseed, generation_info, dummy_component, is_subseed=True)
txt2img_args = dict(
fn=txt2img,
_js="submit",
inputs=[
txt2img_prompt,
txt2img_negative_prompt,
txt2img_prompt_style,
txt2img_prompt_style2,
steps,
sampler_index,
restore_faces,
tiling,
batch_count,
batch_size,
cfg_scale,
seed,
subseed, subseed_strength, seed_resize_from_h, seed_resize_from_w, seed_checkbox,
height,
width,
enable_hr,
scale_latent,
denoising_strength,
] + custom_inputs,
outputs=[
txt2img_gallery,
generation_info,
html_info
],
show_progress=False,
)
txt2img_prompt.submit(**txt2img_args)
submit.click(**txt2img_args)
enable_hr.change(
fn=lambda x: gr_show(x),
inputs=[enable_hr],
outputs=[hr_options],
)
save.click(
fn=wrap_gradio_call(save_files),
_js="(x, y, z) => [x, y, selected_gallery_index()]",
inputs=[
generation_info,
txt2img_gallery,
html_info,
],
outputs=[
html_info,
html_info,
html_info,
]
)
roll.click(
fn=roll_artist,
inputs=[
txt2img_prompt,
],
outputs=[
txt2img_prompt,
]
)
txt2img_paste_fields = [
(txt2img_prompt, "Prompt"),
(txt2img_negative_prompt, "Negative prompt"),
(steps, "Steps"),
(sampler_index, "Sampler"),
(restore_faces, "Face restoration"),
(cfg_scale, "CFG scale"),
(seed, "Seed"),
(width, "Size-1"),
(height, "Size-2"),
(batch_size, "Batch size"),
(subseed, "Variation seed"),
(subseed_strength, "Variation seed strength"),
(seed_resize_from_w, "Seed resize from-1"),
(seed_resize_from_h, "Seed resize from-2"),
(denoising_strength, "Denoising strength"),
(enable_hr, lambda d: "Denoising strength" in d),
(hr_options, lambda d: gr.Row.update(visible="Denoising strength" in d)),
]
modules.generation_parameters_copypaste.connect_paste(paste, txt2img_paste_fields, txt2img_prompt)
with gr.Blocks(analytics_enabled=False) as img2img_interface:
img2img_prompt, roll, img2img_prompt_style, img2img_negative_prompt, img2img_prompt_style2, submit, img2img_interrogate, img2img_prompt_style_apply, img2img_save_style, paste = create_toprow(is_img2img=True)
with gr.Row(elem_id='img2img_progress_row'):
with gr.Column(scale=1):
pass
with gr.Column(scale=1):
progressbar = gr.HTML(elem_id="img2img_progressbar")
img2img_preview = gr.Image(elem_id='img2img_preview', visible=False)
setup_progressbar(progressbar, img2img_preview, 'img2img')
with gr.Row().style(equal_height=False):
with gr.Column(variant='panel'):
with gr.Tabs(elem_id="mode_img2img") as tabs_img2img_mode:
with gr.TabItem('img2img', id='img2img'):
init_img = gr.Image(label="Image for img2img", show_label=False, source="upload", interactive=True, type="pil")
with gr.TabItem('Inpaint', id='inpaint'):
init_img_with_mask = gr.Image(label="Image for inpainting with mask", show_label=False, elem_id="img2maskimg", source="upload", interactive=True, type="pil", tool="sketch", image_mode="RGBA")
init_img_inpaint = gr.Image(label="Image for img2img", show_label=False, source="upload", interactive=True, type="pil", visible=False)
init_mask_inpaint = gr.Image(label="Mask", source="upload", interactive=True, type="pil", visible=False)
mask_blur = gr.Slider(label='Mask blur', minimum=0, maximum=64, step=1, value=4)
with gr.Row():
mask_mode = gr.Radio(label="Mask mode", show_label=False, choices=["Draw mask", "Upload mask"], type="index", value="Draw mask")
inpainting_mask_invert = gr.Radio(label='Masking mode', show_label=False, choices=['Inpaint masked', 'Inpaint not masked'], value='Inpaint masked', type="index")
inpainting_fill = gr.Radio(label='Masked content', choices=['fill', 'original', 'latent noise', 'latent nothing'], value='fill', type="index")
with gr.Row():
inpaint_full_res = gr.Checkbox(label='Inpaint at full resolution', value=False)
inpaint_full_res_padding = gr.Slider(label='Inpaint at full resolution padding, pixels', minimum=0, maximum=256, step=4, value=32)
with gr.TabItem('Batch img2img', id='batch'):
hidden = '<br>Disabled when launched with --hide-ui-dir-config.' if shared.cmd_opts.hide_ui_dir_config else ''
gr.HTML(f"<p class=\"text-gray-500\">Process images in a directory on the same machine where the server is running.{hidden}</p>")
img2img_batch_input_dir = gr.Textbox(label="Input directory", **shared.hide_dirs)
img2img_batch_output_dir = gr.Textbox(label="Output directory", **shared.hide_dirs)
with gr.Row():
resize_mode = gr.Radio(label="Resize mode", elem_id="resize_mode", show_label=False, choices=["Just resize", "Crop and resize", "Resize and fill"], type="index", value="Just resize")
steps = gr.Slider(minimum=1, maximum=150, step=1, label="Sampling Steps", value=20)
sampler_index = gr.Radio(label='Sampling method', choices=[x.name for x in samplers_for_img2img], value=samplers_for_img2img[0].name, type="index")
with gr.Group():
width = gr.Slider(minimum=64, maximum=2048, step=64, label="Width", value=512)
height = gr.Slider(minimum=64, maximum=2048, step=64, label="Height", value=512)
with gr.Row():
restore_faces = gr.Checkbox(label='Restore faces', value=False, visible=len(shared.face_restorers) > 1)
tiling = gr.Checkbox(label='Tiling', value=False)
with gr.Row():
batch_count = gr.Slider(minimum=1, maximum=cmd_opts.max_batch_count, step=1, label='Batch count', value=1)
batch_size = gr.Slider(minimum=1, maximum=8, step=1, label='Batch size', value=1)
with gr.Group():
cfg_scale = gr.Slider(minimum=1.0, maximum=30.0, step=0.5, label='CFG Scale', value=7.0)
denoising_strength = gr.Slider(minimum=0.0, maximum=1.0, step=0.01, label='Denoising strength', value=0.75)
seed, reuse_seed, subseed, reuse_subseed, subseed_strength, seed_resize_from_h, seed_resize_from_w, seed_checkbox = create_seed_inputs()
with gr.Group():
custom_inputs = modules.scripts.scripts_img2img.setup_ui(is_img2img=True)
with gr.Column(variant='panel'):
with gr.Group():
img2img_preview = gr.Image(elem_id='img2img_preview', visible=False)
img2img_gallery = gr.Gallery(label='Output', show_label=False, elem_id='img2img_gallery').style(grid=4)
with gr.Group():
with gr.Row():
save = gr.Button('Save')
img2img_send_to_img2img = gr.Button('Send to img2img')
img2img_send_to_inpaint = gr.Button('Send to inpaint')
img2img_send_to_extras = gr.Button('Send to extras')
with gr.Group():
html_info = gr.HTML()
generation_info = gr.Textbox(visible=False)
connect_reuse_seed(seed, reuse_seed, generation_info, dummy_component, is_subseed=False)
connect_reuse_seed(subseed, reuse_subseed, generation_info, dummy_component, is_subseed=True)
mask_mode.change(
lambda mode, img: {
init_img_with_mask: gr_show(mode == 0),
init_img_inpaint: gr_show(mode == 1),
init_mask_inpaint: gr_show(mode == 1),
},
inputs=[mask_mode, init_img_with_mask],
outputs=[
init_img_with_mask,
init_img_inpaint,
init_mask_inpaint,
],
)
img2img_args = dict(
fn=img2img,
_js="submit_img2img",
inputs=[
dummy_component,
img2img_prompt,
img2img_negative_prompt,
img2img_prompt_style,
img2img_prompt_style2,
init_img,
init_img_with_mask,
init_img_inpaint,
init_mask_inpaint,
mask_mode,
steps,
sampler_index,
mask_blur,
inpainting_fill,
restore_faces,
tiling,
batch_count,
batch_size,
cfg_scale,
denoising_strength,
seed,
subseed, subseed_strength, seed_resize_from_h, seed_resize_from_w, seed_checkbox,
height,
width,
resize_mode,
inpaint_full_res,
inpaint_full_res_padding,
inpainting_mask_invert,
img2img_batch_input_dir,
img2img_batch_output_dir,
] + custom_inputs,
outputs=[
img2img_gallery,
generation_info,
html_info
],
show_progress=False,
)
img2img_prompt.submit(**img2img_args)
submit.click(**img2img_args)
img2img_interrogate.click(
fn=interrogate,
inputs=[init_img],
outputs=[img2img_prompt],
)
save.click(
fn=wrap_gradio_call(save_files),
_js="(x, y, z) => [x, y, selected_gallery_index()]",
inputs=[
generation_info,
img2img_gallery,
html_info
],
outputs=[
html_info,
html_info,
html_info,
]
)
roll.click(
fn=roll_artist,
inputs=[
img2img_prompt,
],
outputs=[
img2img_prompt,
]
)
prompts = [(txt2img_prompt, txt2img_negative_prompt), (img2img_prompt, img2img_negative_prompt)]
style_dropdowns = [(txt2img_prompt_style, txt2img_prompt_style2), (img2img_prompt_style, img2img_prompt_style2)]
for button, (prompt, negative_prompt) in zip([txt2img_save_style, img2img_save_style], prompts):
button.click(
fn=add_style,
_js="ask_for_style_name",
# Have to pass empty dummy component here, because the JavaScript and Python function have to accept
# the same number of parameters, but we only know the style-name after the JavaScript prompt
inputs=[dummy_component, prompt, negative_prompt],
outputs=[txt2img_prompt_style, img2img_prompt_style, txt2img_prompt_style2, img2img_prompt_style2],
)
for button, (prompt, negative_prompt), (style1, style2) in zip([txt2img_prompt_style_apply, img2img_prompt_style_apply], prompts, style_dropdowns):
button.click(
fn=apply_styles,
inputs=[prompt, negative_prompt, style1, style2],
outputs=[prompt, negative_prompt, style1, style2],
)
img2img_paste_fields = [
(img2img_prompt, "Prompt"),
(img2img_negative_prompt, "Negative prompt"),
(steps, "Steps"),
(sampler_index, "Sampler"),
(restore_faces, "Face restoration"),
(cfg_scale, "CFG scale"),
(seed, "Seed"),
(width, "Size-1"),
(height, "Size-2"),
(batch_size, "Batch size"),
(subseed, "Variation seed"),
(subseed_strength, "Variation seed strength"),
(seed_resize_from_w, "Seed resize from-1"),
(seed_resize_from_h, "Seed resize from-2"),
(denoising_strength, "Denoising strength"),
]
modules.generation_parameters_copypaste.connect_paste(paste, img2img_paste_fields, img2img_prompt)
with gr.Blocks(analytics_enabled=False) as extras_interface:
with gr.Row().style(equal_height=False):
with gr.Column(variant='panel'):
with gr.Tabs(elem_id="mode_extras"):
with gr.TabItem('Single Image'):
extras_image = gr.Image(label="Source", source="upload", interactive=True, type="pil")
with gr.TabItem('Batch Process'):
image_batch = gr.File(label="Batch Process", file_count="multiple", interactive=True, type="file")
upscaling_resize = gr.Slider(minimum=1.0, maximum=4.0, step=0.05, label="Resize", value=2)
with gr.Group():
extras_upscaler_1 = gr.Radio(label='Upscaler 1', choices=[x.name for x in shared.sd_upscalers], value=shared.sd_upscalers[0].name, type="index")
with gr.Group():
extras_upscaler_2 = gr.Radio(label='Upscaler 2', choices=[x.name for x in shared.sd_upscalers], value=shared.sd_upscalers[0].name, type="index")
extras_upscaler_2_visibility = gr.Slider(minimum=0.0, maximum=1.0, step=0.001, label="Upscaler 2 visibility", value=1)
with gr.Group():
gfpgan_visibility = gr.Slider(minimum=0.0, maximum=1.0, step=0.001, label="GFPGAN visibility", value=0, interactive=modules.gfpgan_model.have_gfpgan)
with gr.Group():
codeformer_visibility = gr.Slider(minimum=0.0, maximum=1.0, step=0.001, label="CodeFormer visibility", value=0, interactive=modules.codeformer_model.have_codeformer)
codeformer_weight = gr.Slider(minimum=0.0, maximum=1.0, step=0.001, label="CodeFormer weight (0 = maximum effect, 1 = minimum effect)", value=0, interactive=modules.codeformer_model.have_codeformer)
submit = gr.Button('Generate', elem_id="extras_generate", variant='primary')
with gr.Column(variant='panel'):
result_images = gr.Gallery(label="Result", show_label=False)
html_info_x = gr.HTML()
html_info = gr.HTML()
extras_send_to_img2img = gr.Button('Send to img2img')
extras_send_to_inpaint = gr.Button('Send to inpaint')
submit.click(
fn=run_extras,
_js="get_extras_tab_index",
inputs=[
dummy_component,
extras_image,
image_batch,
gfpgan_visibility,
codeformer_visibility,
codeformer_weight,
upscaling_resize,
extras_upscaler_1,
extras_upscaler_2,
extras_upscaler_2_visibility,
],
outputs=[
result_images,
html_info_x,
html_info,
]
)
extras_send_to_img2img.click(
fn=lambda x: image_from_url_text(x),
_js="extract_image_from_gallery_img2img",
inputs=[result_images],
outputs=[init_img],
)
extras_send_to_inpaint.click(
fn=lambda x: image_from_url_text(x),
_js="extract_image_from_gallery_img2img",
inputs=[result_images],
outputs=[init_img_with_mask],
)
with gr.Blocks(analytics_enabled=False) as pnginfo_interface:
with gr.Row().style(equal_height=False):
with gr.Column(variant='panel'):
image = gr.Image(elem_id="pnginfo_image", label="Source", source="upload", interactive=True, type="pil")
with gr.Column(variant='panel'):
html = gr.HTML()
generation_info = gr.Textbox(visible=False)
html2 = gr.HTML()
with gr.Row():
pnginfo_send_to_txt2img = gr.Button('Send to txt2img')
pnginfo_send_to_img2img = gr.Button('Send to img2img')
image.change(
fn=wrap_gradio_call(run_pnginfo),
inputs=[image],
outputs=[html, generation_info, html2],
)
def create_setting_component(key):
def fun():
return opts.data[key] if key in opts.data else opts.data_labels[key].default
info = opts.data_labels[key]
t = type(info.default)
args = info.component_args() if callable(info.component_args) else info.component_args
if info.component is not None:
comp = info.component
elif t == str:
comp = gr.Textbox
elif t == int:
comp = gr.Number
elif t == bool:
comp = gr.Checkbox
else:
raise Exception(f'bad options item type: {str(t)} for key {key}')
return comp(label=info.label, value=fun, **(args or {}))
components = []
def run_settings(*args):
changed = 0
for key, value, comp in zip(opts.data_labels.keys(), args, components):
if not opts.same_type(value, opts.data_labels[key].default):
return f"Bad value for setting {key}: {value}; expecting {type(opts.data_labels[key].default).__name__}"
for key, value, comp in zip(opts.data_labels.keys(), args, components):
comp_args = opts.data_labels[key].component_args
if comp_args and isinstance(comp_args, dict) and comp_args.get('visible') is False:
continue
oldval = opts.data.get(key, None)
opts.data[key] = value
if oldval != value:
if opts.data_labels[key].onchange is not None:
opts.data_labels[key].onchange()
changed += 1
opts.save(shared.config_filename)
return f'{changed} settings changed.', opts.dumpjson()
with gr.Blocks(analytics_enabled=False) as settings_interface:
settings_submit = gr.Button(value="Apply settings", variant='primary')
result = gr.HTML()
settings_cols = 3
items_per_col = int(len(opts.data_labels) * 0.9 / settings_cols)
cols_displayed = 0
items_displayed = 0
previous_section = None
column = None
with gr.Row(elem_id="settings").style(equal_height=False):
for i, (k, item) in enumerate(opts.data_labels.items()):
if previous_section != item.section:
if cols_displayed < settings_cols and (items_displayed >= items_per_col or previous_section is None):
if column is not None:
column.__exit__()
column = gr.Column(variant='panel')
column.__enter__()
items_displayed = 0
cols_displayed += 1
previous_section = item.section
gr.HTML(elem_id="settings_header_text_{}".format(item.section[0]), value='<h1 class="gr-button-lg">{}</h1>'.format(item.section[1]))
components.append(create_setting_component(k))
items_displayed += 1
request_notifications = gr.Button(value='Request browser notifications', elem_id="request_notifications")
request_notifications.click(
fn=lambda: None,
inputs=[],
outputs=[],
_js='function(){}'
)
if column is not None:
column.__exit__()
interfaces = [
(txt2img_interface, "txt2img", "txt2img"),
(img2img_interface, "img2img", "img2img"),
(extras_interface, "Extras", "extras"),
(pnginfo_interface, "PNG Info", "pnginfo"),
(settings_interface, "Settings", "settings"),
]
with open(os.path.join(script_path, "style.css"), "r", encoding="utf8") as file:
css = file.read()
if os.path.exists(os.path.join(script_path, "user.css")):
with open(os.path.join(script_path, "user.css"), "r", encoding="utf8") as file:
usercss = file.read()
css += usercss
if not cmd_opts.no_progressbar_hiding:
css += css_hide_progressbar
with gr.Blocks(css=css, analytics_enabled=False, title="Stable Diffusion") as demo:
with gr.Tabs() as tabs:
for interface, label, ifid in interfaces:
with gr.TabItem(label, id=ifid):
interface.render()
text_settings = gr.Textbox(elem_id="settings_json", value=lambda: opts.dumpjson(), visible=False)
settings_submit.click(
fn=run_settings,
inputs=components,
outputs=[result, text_settings],
)
paste_field_names = ['Prompt', 'Negative prompt', 'Steps', 'Face restoration', 'Seed', 'Size-1', 'Size-2']
txt2img_fields = [field for field,name in txt2img_paste_fields if name in paste_field_names]
img2img_fields = [field for field,name in img2img_paste_fields if name in paste_field_names]
send_to_img2img.click(
fn=lambda img, *args: (image_from_url_text(img),*args),
_js="(gallery, ...args) => [extract_image_from_gallery_img2img(gallery), ...args]",
inputs=[txt2img_gallery] + txt2img_fields,
outputs=[init_img] + img2img_fields,
)
send_to_inpaint.click(
fn=lambda x, *args: (image_from_url_text(x), *args),
_js="(gallery, ...args) => [extract_image_from_gallery_inpaint(gallery), ...args]",
inputs=[txt2img_gallery] + txt2img_fields,
outputs=[init_img_with_mask] + img2img_fields,
)
img2img_send_to_img2img.click(
fn=lambda x: image_from_url_text(x),
_js="extract_image_from_gallery_img2img",
inputs=[img2img_gallery],
outputs=[init_img],
)
img2img_send_to_inpaint.click(
fn=lambda x: image_from_url_text(x),
_js="extract_image_from_gallery_inpaint",
inputs=[img2img_gallery],
outputs=[init_img_with_mask],
)
send_to_extras.click(
fn=lambda x: image_from_url_text(x),
_js="extract_image_from_gallery_extras",
inputs=[txt2img_gallery],
outputs=[extras_image],
)
img2img_send_to_extras.click(
fn=lambda x: image_from_url_text(x),
_js="extract_image_from_gallery_extras",
inputs=[img2img_gallery],
outputs=[extras_image],
)
modules.generation_parameters_copypaste.connect_paste(pnginfo_send_to_txt2img, txt2img_paste_fields, generation_info, 'switch_to_txt2img')
modules.generation_parameters_copypaste.connect_paste(pnginfo_send_to_img2img, img2img_paste_fields, generation_info, 'switch_to_img2img_img2img')
ui_config_file = cmd_opts.ui_config_file
ui_settings = {}
settings_count = len(ui_settings)
error_loading = False
try:
if os.path.exists(ui_config_file):
with open(ui_config_file, "r", encoding="utf8") as file:
ui_settings = json.load(file)
except Exception:
error_loading = True
print("Error loading settings:", file=sys.stderr)
print(traceback.format_exc(), file=sys.stderr)
def loadsave(path, x):
def apply_field(obj, field, condition=None):
key = path + "/" + field
if getattr(obj,'custom_script_source',None) is not None:
key = 'customscript/' + obj.custom_script_source + '/' + key
if getattr(obj, 'do_not_save_to_config', False):
return
saved_value = ui_settings.get(key, None)
if saved_value is None:
ui_settings[key] = getattr(obj, field)
elif condition is None or condition(saved_value):
setattr(obj, field, saved_value)
if type(x) in [gr.Slider, gr.Radio, gr.Checkbox, gr.Textbox, gr.Number] and x.visible:
apply_field(x, 'visible')
if type(x) == gr.Slider:
apply_field(x, 'value')
apply_field(x, 'minimum')
apply_field(x, 'maximum')
apply_field(x, 'step')
if type(x) == gr.Radio:
apply_field(x, 'value', lambda val: val in x.choices)
if type(x) == gr.Checkbox:
apply_field(x, 'value')
if type(x) == gr.Textbox:
apply_field(x, 'value')
if type(x) == gr.Number:
apply_field(x, 'value')
visit(txt2img_interface, loadsave, "txt2img")
visit(img2img_interface, loadsave, "img2img")
visit(extras_interface, loadsave, "extras")
if not error_loading and (not os.path.exists(ui_config_file) or settings_count != len(ui_settings)):
with open(ui_config_file, "w", encoding="utf8") as file:
json.dump(ui_settings, file, indent=4)
return demo
with open(os.path.join(script_path, "script.js"), "r", encoding="utf8") as jsfile:
javascript = f'<script>{jsfile.read()}</script>'
jsdir = os.path.join(script_path, "javascript")
for filename in sorted(os.listdir(jsdir)):
with open(os.path.join(jsdir, filename), "r", encoding="utf8") as jsfile:
javascript += f"\n<script>{jsfile.read()}</script>"
def template_response(*args, **kwargs):
res = gradio_routes_templates_response(*args, **kwargs)
res.body = res.body.replace(b'</head>', f'{javascript}</head>'.encode("utf8"))
res.init_headers()
return res
gradio_routes_templates_response = gradio.routes.templates.TemplateResponse
gradio.routes.templates.TemplateResponse = template_response
<|code_end|>
|
modules/ui.py
<|code_start|>import base64
import html
import io
import json
import math
import mimetypes
import os
import random
import sys
import time
import traceback
import numpy as np
import torch
from PIL import Image
import gradio as gr
import gradio.utils
import gradio.routes
from modules.paths import script_path
from modules.shared import opts, cmd_opts
import modules.shared as shared
from modules.sd_samplers import samplers, samplers_for_img2img
import modules.ldsr_model
import modules.scripts
import modules.gfpgan_model
import modules.codeformer_model
import modules.styles
import modules.generation_parameters_copypaste
# this is a fix for Windows users. Without it, javascript files will be served with text/html content-type and the bowser will not show any UI
mimetypes.init()
mimetypes.add_type('application/javascript', '.js')
if not cmd_opts.share and not cmd_opts.listen:
# fix gradio phoning home
gradio.utils.version_check = lambda: None
gradio.utils.get_local_ip_address = lambda: '127.0.0.1'
def gr_show(visible=True):
return {"visible": visible, "__type__": "update"}
sample_img2img = "assets/stable-samples/img2img/sketch-mountains-input.jpg"
sample_img2img = sample_img2img if os.path.exists(sample_img2img) else None
css_hide_progressbar = """
.wrap .m-12 svg { display:none!important; }
.progress-bar { display:none!important; }
.meta-text { display:none!important; }
"""
# Using constants for these since the variation selector isn't visible.
# Important that they exactly match script.js for tooltip to work.
random_symbol = '\U0001f3b2\ufe0f' # 🎲️
reuse_symbol = '\u267b\ufe0f' # ♻️
art_symbol = '\U0001f3a8' # 🎨
paste_symbol = '\u2199\ufe0f' # ↙
def plaintext_to_html(text):
text = "<p>" + "<br>\n".join([f"{html.escape(x)}" for x in text.split('\n')]) + "</p>"
return text
def image_from_url_text(filedata):
if type(filedata) == list:
if len(filedata) == 0:
return None
filedata = filedata[0]
if filedata.startswith("data:image/png;base64,"):
filedata = filedata[len("data:image/png;base64,"):]
filedata = base64.decodebytes(filedata.encode('utf-8'))
image = Image.open(io.BytesIO(filedata))
return image
def send_gradio_gallery_to_image(x):
if len(x) == 0:
return None
return image_from_url_text(x[0])
def save_files(js_data, images, index):
import csv
os.makedirs(opts.outdir_save, exist_ok=True)
filenames = []
data = json.loads(js_data)
if index > -1 and opts.save_selected_only and (index > 0 or not opts.return_grid): # ensures we are looking at a specific non-grid picture, and we have save_selected_only
images = [images[index]]
data["seed"] += (index - 1 if opts.return_grid else index)
with open(os.path.join(opts.outdir_save, "log.csv"), "a", encoding="utf8", newline='') as file:
at_start = file.tell() == 0
writer = csv.writer(file)
if at_start:
writer.writerow(["prompt", "seed", "width", "height", "sampler", "cfgs", "steps", "filename", "negative_prompt"])
filename_base = str(int(time.time() * 1000))
for i, filedata in enumerate(images):
filename = filename_base + ("" if len(images) == 1 else "-" + str(i + 1)) + ".png"
filepath = os.path.join(opts.outdir_save, filename)
if filedata.startswith("data:image/png;base64,"):
filedata = filedata[len("data:image/png;base64,"):]
with open(filepath, "wb") as imgfile:
imgfile.write(base64.decodebytes(filedata.encode('utf-8')))
filenames.append(filename)
writer.writerow([data["prompt"], data["seed"], data["width"], data["height"], data["sampler"], data["cfg_scale"], data["steps"], filenames[0], data["negative_prompt"]])
return '', '', plaintext_to_html(f"Saved: {filenames[0]}")
def wrap_gradio_call(func):
def f(*args, **kwargs):
run_memmon = opts.memmon_poll_rate > 0 and not shared.mem_mon.disabled
if run_memmon:
shared.mem_mon.monitor()
t = time.perf_counter()
try:
res = list(func(*args, **kwargs))
except Exception as e:
print("Error completing request", file=sys.stderr)
print("Arguments:", args, kwargs, file=sys.stderr)
print(traceback.format_exc(), file=sys.stderr)
shared.state.job = ""
shared.state.job_count = 0
res = [None, '', f"<div class='error'>{plaintext_to_html(type(e).__name__+': '+str(e))}</div>"]
elapsed = time.perf_counter() - t
if run_memmon:
mem_stats = {k: -(v//-(1024*1024)) for k, v in shared.mem_mon.stop().items()}
active_peak = mem_stats['active_peak']
reserved_peak = mem_stats['reserved_peak']
sys_peak = mem_stats['system_peak']
sys_total = mem_stats['total']
sys_pct = round(sys_peak/max(sys_total, 1) * 100, 2)
vram_html = f"<p class='vram'>Torch active/reserved: {active_peak}/{reserved_peak} MiB, <wbr>Sys VRAM: {sys_peak}/{sys_total} MiB ({sys_pct}%)</p>"
else:
vram_html = ''
# last item is always HTML
res[-1] += f"<div class='performance'><p class='time'>Time taken: <wbr>{elapsed:.2f}s</p>{vram_html}</div>"
shared.state.interrupted = False
return tuple(res)
return f
def check_progress_call(id_part):
if shared.state.job_count == 0:
return "", gr_show(False), gr_show(False)
progress = 0
if shared.state.job_count > 0:
progress += shared.state.job_no / shared.state.job_count
if shared.state.sampling_steps > 0:
progress += 1 / shared.state.job_count * shared.state.sampling_step / shared.state.sampling_steps
progress = min(progress, 1)
progressbar = ""
if opts.show_progressbar:
progressbar = f"""<div class='progressDiv'><div class='progress' style="width:{progress * 100}%">{str(int(progress*100))+"%" if progress > 0.01 else ""}</div></div>"""
image = gr_show(False)
preview_visibility = gr_show(False)
if opts.show_progress_every_n_steps > 0:
if shared.parallel_processing_allowed:
if shared.state.sampling_step - shared.state.current_image_sampling_step >= opts.show_progress_every_n_steps and shared.state.current_latent is not None:
shared.state.current_image = modules.sd_samplers.sample_to_image(shared.state.current_latent)
shared.state.current_image_sampling_step = shared.state.sampling_step
image = shared.state.current_image
if image is None:
image = gr.update(value=None)
else:
preview_visibility = gr_show(True)
return f"<span id='{id_part}_progress_span' style='display: none'>{time.time()}</span><p>{progressbar}</p>", preview_visibility, image
def check_progress_call_initial(id_part):
shared.state.job_count = -1
shared.state.current_latent = None
shared.state.current_image = None
return check_progress_call(id_part)
def roll_artist(prompt):
allowed_cats = set([x for x in shared.artist_db.categories() if len(opts.random_artist_categories)==0 or x in opts.random_artist_categories])
artist = random.choice([x for x in shared.artist_db.artists if x.category in allowed_cats])
return prompt + ", " + artist.name if prompt != '' else artist.name
def visit(x, func, path=""):
if hasattr(x, 'children'):
for c in x.children:
visit(c, func, path)
elif x.label is not None:
func(path + "/" + str(x.label), x)
def add_style(name: str, prompt: str, negative_prompt: str):
if name is None:
return [gr_show(), gr_show()]
style = modules.styles.PromptStyle(name, prompt, negative_prompt)
shared.prompt_styles.styles[style.name] = style
# Save all loaded prompt styles: this allows us to update the storage format in the future more easily, because we
# reserialize all styles every time we save them
shared.prompt_styles.save_styles(shared.styles_filename)
return [gr.Dropdown.update(visible=True, choices=list(shared.prompt_styles.styles)) for _ in range(4)]
def apply_styles(prompt, prompt_neg, style1_name, style2_name):
prompt = shared.prompt_styles.apply_styles_to_prompt(prompt, [style1_name, style2_name])
prompt_neg = shared.prompt_styles.apply_negative_styles_to_prompt(prompt_neg, [style1_name, style2_name])
return [gr.Textbox.update(value=prompt), gr.Textbox.update(value=prompt_neg), gr.Dropdown.update(value="None"), gr.Dropdown.update(value="None")]
def interrogate(image):
prompt = shared.interrogator.interrogate(image)
return gr_show(True) if prompt is None else prompt
def create_seed_inputs():
with gr.Row():
with gr.Box():
with gr.Row(elem_id='seed_row'):
seed = (gr.Textbox if cmd_opts.use_textbox_seed else gr.Number)(label='Seed', value=-1)
seed.style(container=False)
random_seed = gr.Button(random_symbol, elem_id='random_seed')
reuse_seed = gr.Button(reuse_symbol, elem_id='reuse_seed')
with gr.Box(elem_id='subseed_show_box'):
seed_checkbox = gr.Checkbox(label='Extra', elem_id='subseed_show', value=False)
# Components to show/hide based on the 'Extra' checkbox
seed_extras = []
with gr.Row(visible=False) as seed_extra_row_1:
seed_extras.append(seed_extra_row_1)
with gr.Box():
with gr.Row(elem_id='subseed_row'):
subseed = gr.Number(label='Variation seed', value=-1)
subseed.style(container=False)
random_subseed = gr.Button(random_symbol, elem_id='random_subseed')
reuse_subseed = gr.Button(reuse_symbol, elem_id='reuse_subseed')
subseed_strength = gr.Slider(label='Variation strength', value=0.0, minimum=0, maximum=1, step=0.01)
with gr.Row(visible=False) as seed_extra_row_2:
seed_extras.append(seed_extra_row_2)
seed_resize_from_w = gr.Slider(minimum=0, maximum=2048, step=64, label="Resize seed from width", value=0)
seed_resize_from_h = gr.Slider(minimum=0, maximum=2048, step=64, label="Resize seed from height", value=0)
random_seed.click(fn=lambda: -1, show_progress=False, inputs=[], outputs=[seed])
random_subseed.click(fn=lambda: -1, show_progress=False, inputs=[], outputs=[subseed])
def change_visibility(show):
return {comp: gr_show(show) for comp in seed_extras}
seed_checkbox.change(change_visibility, show_progress=False, inputs=[seed_checkbox], outputs=seed_extras)
return seed, reuse_seed, subseed, reuse_subseed, subseed_strength, seed_resize_from_h, seed_resize_from_w, seed_checkbox
def connect_reuse_seed(seed: gr.Number, reuse_seed: gr.Button, generation_info: gr.Textbox, dummy_component, is_subseed):
""" Connects a 'reuse (sub)seed' button's click event so that it copies last used
(sub)seed value from generation info the to the seed field. If copying subseed and subseed strength
was 0, i.e. no variation seed was used, it copies the normal seed value instead."""
def copy_seed(gen_info_string: str, index):
res = -1
try:
gen_info = json.loads(gen_info_string)
index -= gen_info.get('index_of_first_image', 0)
if is_subseed and gen_info.get('subseed_strength', 0) > 0:
all_subseeds = gen_info.get('all_subseeds', [-1])
res = all_subseeds[index if 0 <= index < len(all_subseeds) else 0]
else:
all_seeds = gen_info.get('all_seeds', [-1])
res = all_seeds[index if 0 <= index < len(all_seeds) else 0]
except json.decoder.JSONDecodeError as e:
if gen_info_string != '':
print("Error parsing JSON generation info:", file=sys.stderr)
print(gen_info_string, file=sys.stderr)
return [res, gr_show(False)]
reuse_seed.click(
fn=copy_seed,
_js="(x, y) => [x, selected_gallery_index()]",
show_progress=False,
inputs=[generation_info, dummy_component],
outputs=[seed, dummy_component]
)
def create_toprow(is_img2img):
id_part = "img2img" if is_img2img else "txt2img"
with gr.Row(elem_id="toprow"):
with gr.Column(scale=4):
with gr.Row():
with gr.Column(scale=80):
with gr.Row():
prompt = gr.Textbox(label="Prompt", elem_id="prompt", show_label=False, placeholder="Prompt", lines=2)
with gr.Column(scale=1, elem_id="roll_col"):
roll = gr.Button(value=art_symbol, elem_id="roll", visible=len(shared.artist_db.artists) > 0)
paste = gr.Button(value=paste_symbol, elem_id="paste")
with gr.Column(scale=10, elem_id="style_pos_col"):
prompt_style = gr.Dropdown(label="Style 1", elem_id=f"{id_part}_style_index", choices=[k for k, v in shared.prompt_styles.styles.items()], value=next(iter(shared.prompt_styles.styles.keys())), visible=len(shared.prompt_styles.styles) > 1)
with gr.Row():
with gr.Column(scale=8):
negative_prompt = gr.Textbox(label="Negative prompt", elem_id="negative_prompt", show_label=False, placeholder="Negative prompt", lines=2)
with gr.Column(scale=1, elem_id="style_neg_col"):
prompt_style2 = gr.Dropdown(label="Style 2", elem_id=f"{id_part}_style2_index", choices=[k for k, v in shared.prompt_styles.styles.items()], value=next(iter(shared.prompt_styles.styles.keys())), visible=len(shared.prompt_styles.styles) > 1)
with gr.Column(scale=1):
with gr.Row():
interrupt = gr.Button('Interrupt', elem_id=f"{id_part}_interrupt")
submit = gr.Button('Generate', elem_id="generate", variant='primary')
interrupt.click(
fn=lambda: shared.state.interrupt(),
inputs=[],
outputs=[],
)
with gr.Row():
if is_img2img:
interrogate = gr.Button('Interrogate', elem_id="interrogate")
else:
interrogate = None
prompt_style_apply = gr.Button('Apply style', elem_id="style_apply")
save_style = gr.Button('Create style', elem_id="style_create")
return prompt, roll, prompt_style, negative_prompt, prompt_style2, submit, interrogate, prompt_style_apply, save_style, paste
def setup_progressbar(progressbar, preview, id_part):
check_progress = gr.Button('Check progress', elem_id=f"{id_part}_check_progress", visible=False)
check_progress.click(
fn=lambda: check_progress_call(id_part),
show_progress=False,
inputs=[],
outputs=[progressbar, preview, preview],
)
check_progress_initial = gr.Button('Check progress (first)', elem_id=f"{id_part}_check_progress_initial", visible=False)
check_progress_initial.click(
fn=lambda: check_progress_call_initial(id_part),
show_progress=False,
inputs=[],
outputs=[progressbar, preview, preview],
)
def create_ui(txt2img, img2img, run_extras, run_pnginfo):
with gr.Blocks(analytics_enabled=False) as txt2img_interface:
txt2img_prompt, roll, txt2img_prompt_style, txt2img_negative_prompt, txt2img_prompt_style2, submit, _, txt2img_prompt_style_apply, txt2img_save_style, paste = create_toprow(is_img2img=False)
dummy_component = gr.Label(visible=False)
with gr.Row(elem_id='txt2img_progress_row'):
with gr.Column(scale=1):
pass
with gr.Column(scale=1):
progressbar = gr.HTML(elem_id="txt2img_progressbar")
txt2img_preview = gr.Image(elem_id='txt2img_preview', visible=False)
setup_progressbar(progressbar, txt2img_preview, 'txt2img')
with gr.Row().style(equal_height=False):
with gr.Column(variant='panel'):
steps = gr.Slider(minimum=1, maximum=150, step=1, label="Sampling Steps", value=20)
sampler_index = gr.Radio(label='Sampling method', elem_id="txt2img_sampling", choices=[x.name for x in samplers], value=samplers[0].name, type="index")
with gr.Group():
width = gr.Slider(minimum=64, maximum=2048, step=64, label="Width", value=512)
height = gr.Slider(minimum=64, maximum=2048, step=64, label="Height", value=512)
with gr.Row():
restore_faces = gr.Checkbox(label='Restore faces', value=False, visible=len(shared.face_restorers) > 1)
tiling = gr.Checkbox(label='Tiling', value=False)
enable_hr = gr.Checkbox(label='Highres. fix', value=False)
with gr.Row(visible=False) as hr_options:
scale_latent = gr.Checkbox(label='Scale latent', value=False)
denoising_strength = gr.Slider(minimum=0.0, maximum=1.0, step=0.01, label='Denoising strength', value=0.7)
with gr.Row():
batch_count = gr.Slider(minimum=1, maximum=cmd_opts.max_batch_count, step=1, label='Batch count', value=1)
batch_size = gr.Slider(minimum=1, maximum=8, step=1, label='Batch size', value=1)
cfg_scale = gr.Slider(minimum=1.0, maximum=30.0, step=0.5, label='CFG Scale', value=7.0)
seed, reuse_seed, subseed, reuse_subseed, subseed_strength, seed_resize_from_h, seed_resize_from_w, seed_checkbox = create_seed_inputs()
with gr.Group():
custom_inputs = modules.scripts.scripts_txt2img.setup_ui(is_img2img=False)
with gr.Column(variant='panel'):
with gr.Group():
txt2img_preview = gr.Image(elem_id='txt2img_preview', visible=False)
txt2img_gallery = gr.Gallery(label='Output', show_label=False, elem_id='txt2img_gallery').style(grid=4)
with gr.Group():
with gr.Row():
save = gr.Button('Save')
send_to_img2img = gr.Button('Send to img2img')
send_to_inpaint = gr.Button('Send to inpaint')
send_to_extras = gr.Button('Send to extras')
with gr.Group():
html_info = gr.HTML()
generation_info = gr.Textbox(visible=False)
connect_reuse_seed(seed, reuse_seed, generation_info, dummy_component, is_subseed=False)
connect_reuse_seed(subseed, reuse_subseed, generation_info, dummy_component, is_subseed=True)
txt2img_args = dict(
fn=txt2img,
_js="submit",
inputs=[
txt2img_prompt,
txt2img_negative_prompt,
txt2img_prompt_style,
txt2img_prompt_style2,
steps,
sampler_index,
restore_faces,
tiling,
batch_count,
batch_size,
cfg_scale,
seed,
subseed, subseed_strength, seed_resize_from_h, seed_resize_from_w, seed_checkbox,
height,
width,
enable_hr,
scale_latent,
denoising_strength,
] + custom_inputs,
outputs=[
txt2img_gallery,
generation_info,
html_info
],
show_progress=False,
)
txt2img_prompt.submit(**txt2img_args)
submit.click(**txt2img_args)
enable_hr.change(
fn=lambda x: gr_show(x),
inputs=[enable_hr],
outputs=[hr_options],
)
save.click(
fn=wrap_gradio_call(save_files),
_js="(x, y, z) => [x, y, selected_gallery_index()]",
inputs=[
generation_info,
txt2img_gallery,
html_info,
],
outputs=[
html_info,
html_info,
html_info,
]
)
roll.click(
fn=roll_artist,
inputs=[
txt2img_prompt,
],
outputs=[
txt2img_prompt,
]
)
txt2img_paste_fields = [
(txt2img_prompt, "Prompt"),
(txt2img_negative_prompt, "Negative prompt"),
(steps, "Steps"),
(sampler_index, "Sampler"),
(restore_faces, "Face restoration"),
(cfg_scale, "CFG scale"),
(seed, "Seed"),
(width, "Size-1"),
(height, "Size-2"),
(batch_size, "Batch size"),
(subseed, "Variation seed"),
(subseed_strength, "Variation seed strength"),
(seed_resize_from_w, "Seed resize from-1"),
(seed_resize_from_h, "Seed resize from-2"),
(denoising_strength, "Denoising strength"),
(enable_hr, lambda d: "Denoising strength" in d),
(hr_options, lambda d: gr.Row.update(visible="Denoising strength" in d)),
]
modules.generation_parameters_copypaste.connect_paste(paste, txt2img_paste_fields, txt2img_prompt)
with gr.Blocks(analytics_enabled=False) as img2img_interface:
img2img_prompt, roll, img2img_prompt_style, img2img_negative_prompt, img2img_prompt_style2, submit, img2img_interrogate, img2img_prompt_style_apply, img2img_save_style, paste = create_toprow(is_img2img=True)
with gr.Row(elem_id='img2img_progress_row'):
with gr.Column(scale=1):
pass
with gr.Column(scale=1):
progressbar = gr.HTML(elem_id="img2img_progressbar")
img2img_preview = gr.Image(elem_id='img2img_preview', visible=False)
setup_progressbar(progressbar, img2img_preview, 'img2img')
with gr.Row().style(equal_height=False):
with gr.Column(variant='panel'):
with gr.Tabs(elem_id="mode_img2img") as tabs_img2img_mode:
with gr.TabItem('img2img', id='img2img'):
init_img = gr.Image(label="Image for img2img", show_label=False, source="upload", interactive=True, type="pil")
with gr.TabItem('Inpaint', id='inpaint'):
init_img_with_mask = gr.Image(label="Image for inpainting with mask", show_label=False, elem_id="img2maskimg", source="upload", interactive=True, type="pil", tool="sketch", image_mode="RGBA")
init_img_inpaint = gr.Image(label="Image for img2img", show_label=False, source="upload", interactive=True, type="pil", visible=False, elem_id="img_inpaint_base")
init_mask_inpaint = gr.Image(label="Mask", source="upload", interactive=True, type="pil", visible=False, elem_id="img_inpaint_mask")
mask_blur = gr.Slider(label='Mask blur', minimum=0, maximum=64, step=1, value=4)
with gr.Row():
mask_mode = gr.Radio(label="Mask mode", show_label=False, choices=["Draw mask", "Upload mask"], type="index", value="Draw mask", elem_id="mask_mode")
inpainting_mask_invert = gr.Radio(label='Masking mode', show_label=False, choices=['Inpaint masked', 'Inpaint not masked'], value='Inpaint masked', type="index")
inpainting_fill = gr.Radio(label='Masked content', choices=['fill', 'original', 'latent noise', 'latent nothing'], value='fill', type="index")
with gr.Row():
inpaint_full_res = gr.Checkbox(label='Inpaint at full resolution', value=False)
inpaint_full_res_padding = gr.Slider(label='Inpaint at full resolution padding, pixels', minimum=0, maximum=256, step=4, value=32)
with gr.TabItem('Batch img2img', id='batch'):
hidden = '<br>Disabled when launched with --hide-ui-dir-config.' if shared.cmd_opts.hide_ui_dir_config else ''
gr.HTML(f"<p class=\"text-gray-500\">Process images in a directory on the same machine where the server is running.{hidden}</p>")
img2img_batch_input_dir = gr.Textbox(label="Input directory", **shared.hide_dirs)
img2img_batch_output_dir = gr.Textbox(label="Output directory", **shared.hide_dirs)
with gr.Row():
resize_mode = gr.Radio(label="Resize mode", elem_id="resize_mode", show_label=False, choices=["Just resize", "Crop and resize", "Resize and fill"], type="index", value="Just resize")
steps = gr.Slider(minimum=1, maximum=150, step=1, label="Sampling Steps", value=20)
sampler_index = gr.Radio(label='Sampling method', choices=[x.name for x in samplers_for_img2img], value=samplers_for_img2img[0].name, type="index")
with gr.Group():
width = gr.Slider(minimum=64, maximum=2048, step=64, label="Width", value=512)
height = gr.Slider(minimum=64, maximum=2048, step=64, label="Height", value=512)
with gr.Row():
restore_faces = gr.Checkbox(label='Restore faces', value=False, visible=len(shared.face_restorers) > 1)
tiling = gr.Checkbox(label='Tiling', value=False)
with gr.Row():
batch_count = gr.Slider(minimum=1, maximum=cmd_opts.max_batch_count, step=1, label='Batch count', value=1)
batch_size = gr.Slider(minimum=1, maximum=8, step=1, label='Batch size', value=1)
with gr.Group():
cfg_scale = gr.Slider(minimum=1.0, maximum=30.0, step=0.5, label='CFG Scale', value=7.0)
denoising_strength = gr.Slider(minimum=0.0, maximum=1.0, step=0.01, label='Denoising strength', value=0.75)
seed, reuse_seed, subseed, reuse_subseed, subseed_strength, seed_resize_from_h, seed_resize_from_w, seed_checkbox = create_seed_inputs()
with gr.Group():
custom_inputs = modules.scripts.scripts_img2img.setup_ui(is_img2img=True)
with gr.Column(variant='panel'):
with gr.Group():
img2img_preview = gr.Image(elem_id='img2img_preview', visible=False)
img2img_gallery = gr.Gallery(label='Output', show_label=False, elem_id='img2img_gallery').style(grid=4)
with gr.Group():
with gr.Row():
save = gr.Button('Save')
img2img_send_to_img2img = gr.Button('Send to img2img')
img2img_send_to_inpaint = gr.Button('Send to inpaint')
img2img_send_to_extras = gr.Button('Send to extras')
with gr.Group():
html_info = gr.HTML()
generation_info = gr.Textbox(visible=False)
connect_reuse_seed(seed, reuse_seed, generation_info, dummy_component, is_subseed=False)
connect_reuse_seed(subseed, reuse_subseed, generation_info, dummy_component, is_subseed=True)
mask_mode.change(
lambda mode, img: {
init_img_with_mask: gr_show(mode == 0),
init_img_inpaint: gr_show(mode == 1),
init_mask_inpaint: gr_show(mode == 1),
},
inputs=[mask_mode, init_img_with_mask],
outputs=[
init_img_with_mask,
init_img_inpaint,
init_mask_inpaint,
],
)
img2img_args = dict(
fn=img2img,
_js="submit_img2img",
inputs=[
dummy_component,
img2img_prompt,
img2img_negative_prompt,
img2img_prompt_style,
img2img_prompt_style2,
init_img,
init_img_with_mask,
init_img_inpaint,
init_mask_inpaint,
mask_mode,
steps,
sampler_index,
mask_blur,
inpainting_fill,
restore_faces,
tiling,
batch_count,
batch_size,
cfg_scale,
denoising_strength,
seed,
subseed, subseed_strength, seed_resize_from_h, seed_resize_from_w, seed_checkbox,
height,
width,
resize_mode,
inpaint_full_res,
inpaint_full_res_padding,
inpainting_mask_invert,
img2img_batch_input_dir,
img2img_batch_output_dir,
] + custom_inputs,
outputs=[
img2img_gallery,
generation_info,
html_info
],
show_progress=False,
)
img2img_prompt.submit(**img2img_args)
submit.click(**img2img_args)
img2img_interrogate.click(
fn=interrogate,
inputs=[init_img],
outputs=[img2img_prompt],
)
save.click(
fn=wrap_gradio_call(save_files),
_js="(x, y, z) => [x, y, selected_gallery_index()]",
inputs=[
generation_info,
img2img_gallery,
html_info
],
outputs=[
html_info,
html_info,
html_info,
]
)
roll.click(
fn=roll_artist,
inputs=[
img2img_prompt,
],
outputs=[
img2img_prompt,
]
)
prompts = [(txt2img_prompt, txt2img_negative_prompt), (img2img_prompt, img2img_negative_prompt)]
style_dropdowns = [(txt2img_prompt_style, txt2img_prompt_style2), (img2img_prompt_style, img2img_prompt_style2)]
for button, (prompt, negative_prompt) in zip([txt2img_save_style, img2img_save_style], prompts):
button.click(
fn=add_style,
_js="ask_for_style_name",
# Have to pass empty dummy component here, because the JavaScript and Python function have to accept
# the same number of parameters, but we only know the style-name after the JavaScript prompt
inputs=[dummy_component, prompt, negative_prompt],
outputs=[txt2img_prompt_style, img2img_prompt_style, txt2img_prompt_style2, img2img_prompt_style2],
)
for button, (prompt, negative_prompt), (style1, style2) in zip([txt2img_prompt_style_apply, img2img_prompt_style_apply], prompts, style_dropdowns):
button.click(
fn=apply_styles,
inputs=[prompt, negative_prompt, style1, style2],
outputs=[prompt, negative_prompt, style1, style2],
)
img2img_paste_fields = [
(img2img_prompt, "Prompt"),
(img2img_negative_prompt, "Negative prompt"),
(steps, "Steps"),
(sampler_index, "Sampler"),
(restore_faces, "Face restoration"),
(cfg_scale, "CFG scale"),
(seed, "Seed"),
(width, "Size-1"),
(height, "Size-2"),
(batch_size, "Batch size"),
(subseed, "Variation seed"),
(subseed_strength, "Variation seed strength"),
(seed_resize_from_w, "Seed resize from-1"),
(seed_resize_from_h, "Seed resize from-2"),
(denoising_strength, "Denoising strength"),
]
modules.generation_parameters_copypaste.connect_paste(paste, img2img_paste_fields, img2img_prompt)
with gr.Blocks(analytics_enabled=False) as extras_interface:
with gr.Row().style(equal_height=False):
with gr.Column(variant='panel'):
with gr.Tabs(elem_id="mode_extras"):
with gr.TabItem('Single Image'):
extras_image = gr.Image(label="Source", source="upload", interactive=True, type="pil")
with gr.TabItem('Batch Process'):
image_batch = gr.File(label="Batch Process", file_count="multiple", interactive=True, type="file")
upscaling_resize = gr.Slider(minimum=1.0, maximum=4.0, step=0.05, label="Resize", value=2)
with gr.Group():
extras_upscaler_1 = gr.Radio(label='Upscaler 1', choices=[x.name for x in shared.sd_upscalers], value=shared.sd_upscalers[0].name, type="index")
with gr.Group():
extras_upscaler_2 = gr.Radio(label='Upscaler 2', choices=[x.name for x in shared.sd_upscalers], value=shared.sd_upscalers[0].name, type="index")
extras_upscaler_2_visibility = gr.Slider(minimum=0.0, maximum=1.0, step=0.001, label="Upscaler 2 visibility", value=1)
with gr.Group():
gfpgan_visibility = gr.Slider(minimum=0.0, maximum=1.0, step=0.001, label="GFPGAN visibility", value=0, interactive=modules.gfpgan_model.have_gfpgan)
with gr.Group():
codeformer_visibility = gr.Slider(minimum=0.0, maximum=1.0, step=0.001, label="CodeFormer visibility", value=0, interactive=modules.codeformer_model.have_codeformer)
codeformer_weight = gr.Slider(minimum=0.0, maximum=1.0, step=0.001, label="CodeFormer weight (0 = maximum effect, 1 = minimum effect)", value=0, interactive=modules.codeformer_model.have_codeformer)
submit = gr.Button('Generate', elem_id="extras_generate", variant='primary')
with gr.Column(variant='panel'):
result_images = gr.Gallery(label="Result", show_label=False)
html_info_x = gr.HTML()
html_info = gr.HTML()
extras_send_to_img2img = gr.Button('Send to img2img')
extras_send_to_inpaint = gr.Button('Send to inpaint')
submit.click(
fn=run_extras,
_js="get_extras_tab_index",
inputs=[
dummy_component,
extras_image,
image_batch,
gfpgan_visibility,
codeformer_visibility,
codeformer_weight,
upscaling_resize,
extras_upscaler_1,
extras_upscaler_2,
extras_upscaler_2_visibility,
],
outputs=[
result_images,
html_info_x,
html_info,
]
)
extras_send_to_img2img.click(
fn=lambda x: image_from_url_text(x),
_js="extract_image_from_gallery_img2img",
inputs=[result_images],
outputs=[init_img],
)
extras_send_to_inpaint.click(
fn=lambda x: image_from_url_text(x),
_js="extract_image_from_gallery_img2img",
inputs=[result_images],
outputs=[init_img_with_mask],
)
with gr.Blocks(analytics_enabled=False) as pnginfo_interface:
with gr.Row().style(equal_height=False):
with gr.Column(variant='panel'):
image = gr.Image(elem_id="pnginfo_image", label="Source", source="upload", interactive=True, type="pil")
with gr.Column(variant='panel'):
html = gr.HTML()
generation_info = gr.Textbox(visible=False)
html2 = gr.HTML()
with gr.Row():
pnginfo_send_to_txt2img = gr.Button('Send to txt2img')
pnginfo_send_to_img2img = gr.Button('Send to img2img')
image.change(
fn=wrap_gradio_call(run_pnginfo),
inputs=[image],
outputs=[html, generation_info, html2],
)
def create_setting_component(key):
def fun():
return opts.data[key] if key in opts.data else opts.data_labels[key].default
info = opts.data_labels[key]
t = type(info.default)
args = info.component_args() if callable(info.component_args) else info.component_args
if info.component is not None:
comp = info.component
elif t == str:
comp = gr.Textbox
elif t == int:
comp = gr.Number
elif t == bool:
comp = gr.Checkbox
else:
raise Exception(f'bad options item type: {str(t)} for key {key}')
return comp(label=info.label, value=fun, **(args or {}))
components = []
def run_settings(*args):
changed = 0
for key, value, comp in zip(opts.data_labels.keys(), args, components):
if not opts.same_type(value, opts.data_labels[key].default):
return f"Bad value for setting {key}: {value}; expecting {type(opts.data_labels[key].default).__name__}"
for key, value, comp in zip(opts.data_labels.keys(), args, components):
comp_args = opts.data_labels[key].component_args
if comp_args and isinstance(comp_args, dict) and comp_args.get('visible') is False:
continue
oldval = opts.data.get(key, None)
opts.data[key] = value
if oldval != value:
if opts.data_labels[key].onchange is not None:
opts.data_labels[key].onchange()
changed += 1
opts.save(shared.config_filename)
return f'{changed} settings changed.', opts.dumpjson()
with gr.Blocks(analytics_enabled=False) as settings_interface:
settings_submit = gr.Button(value="Apply settings", variant='primary')
result = gr.HTML()
settings_cols = 3
items_per_col = int(len(opts.data_labels) * 0.9 / settings_cols)
cols_displayed = 0
items_displayed = 0
previous_section = None
column = None
with gr.Row(elem_id="settings").style(equal_height=False):
for i, (k, item) in enumerate(opts.data_labels.items()):
if previous_section != item.section:
if cols_displayed < settings_cols and (items_displayed >= items_per_col or previous_section is None):
if column is not None:
column.__exit__()
column = gr.Column(variant='panel')
column.__enter__()
items_displayed = 0
cols_displayed += 1
previous_section = item.section
gr.HTML(elem_id="settings_header_text_{}".format(item.section[0]), value='<h1 class="gr-button-lg">{}</h1>'.format(item.section[1]))
components.append(create_setting_component(k))
items_displayed += 1
request_notifications = gr.Button(value='Request browser notifications', elem_id="request_notifications")
request_notifications.click(
fn=lambda: None,
inputs=[],
outputs=[],
_js='function(){}'
)
if column is not None:
column.__exit__()
interfaces = [
(txt2img_interface, "txt2img", "txt2img"),
(img2img_interface, "img2img", "img2img"),
(extras_interface, "Extras", "extras"),
(pnginfo_interface, "PNG Info", "pnginfo"),
(settings_interface, "Settings", "settings"),
]
with open(os.path.join(script_path, "style.css"), "r", encoding="utf8") as file:
css = file.read()
if os.path.exists(os.path.join(script_path, "user.css")):
with open(os.path.join(script_path, "user.css"), "r", encoding="utf8") as file:
usercss = file.read()
css += usercss
if not cmd_opts.no_progressbar_hiding:
css += css_hide_progressbar
with gr.Blocks(css=css, analytics_enabled=False, title="Stable Diffusion") as demo:
with gr.Tabs() as tabs:
for interface, label, ifid in interfaces:
with gr.TabItem(label, id=ifid):
interface.render()
if os.path.exists(os.path.join(script_path, "notification.mp3")):
audio_notification = gr.Audio(interactive=False, value=os.path.join(script_path, "notification.mp3"), elem_id="audio_notification", visible=False)
text_settings = gr.Textbox(elem_id="settings_json", value=lambda: opts.dumpjson(), visible=False)
settings_submit.click(
fn=run_settings,
inputs=components,
outputs=[result, text_settings],
)
paste_field_names = ['Prompt', 'Negative prompt', 'Steps', 'Face restoration', 'Seed', 'Size-1', 'Size-2']
txt2img_fields = [field for field,name in txt2img_paste_fields if name in paste_field_names]
img2img_fields = [field for field,name in img2img_paste_fields if name in paste_field_names]
send_to_img2img.click(
fn=lambda img, *args: (image_from_url_text(img),*args),
_js="(gallery, ...args) => [extract_image_from_gallery_img2img(gallery), ...args]",
inputs=[txt2img_gallery] + txt2img_fields,
outputs=[init_img] + img2img_fields,
)
send_to_inpaint.click(
fn=lambda x, *args: (image_from_url_text(x), *args),
_js="(gallery, ...args) => [extract_image_from_gallery_inpaint(gallery), ...args]",
inputs=[txt2img_gallery] + txt2img_fields,
outputs=[init_img_with_mask] + img2img_fields,
)
img2img_send_to_img2img.click(
fn=lambda x: image_from_url_text(x),
_js="extract_image_from_gallery_img2img",
inputs=[img2img_gallery],
outputs=[init_img],
)
img2img_send_to_inpaint.click(
fn=lambda x: image_from_url_text(x),
_js="extract_image_from_gallery_inpaint",
inputs=[img2img_gallery],
outputs=[init_img_with_mask],
)
send_to_extras.click(
fn=lambda x: image_from_url_text(x),
_js="extract_image_from_gallery_extras",
inputs=[txt2img_gallery],
outputs=[extras_image],
)
img2img_send_to_extras.click(
fn=lambda x: image_from_url_text(x),
_js="extract_image_from_gallery_extras",
inputs=[img2img_gallery],
outputs=[extras_image],
)
modules.generation_parameters_copypaste.connect_paste(pnginfo_send_to_txt2img, txt2img_paste_fields, generation_info, 'switch_to_txt2img')
modules.generation_parameters_copypaste.connect_paste(pnginfo_send_to_img2img, img2img_paste_fields, generation_info, 'switch_to_img2img_img2img')
ui_config_file = cmd_opts.ui_config_file
ui_settings = {}
settings_count = len(ui_settings)
error_loading = False
try:
if os.path.exists(ui_config_file):
with open(ui_config_file, "r", encoding="utf8") as file:
ui_settings = json.load(file)
except Exception:
error_loading = True
print("Error loading settings:", file=sys.stderr)
print(traceback.format_exc(), file=sys.stderr)
def loadsave(path, x):
def apply_field(obj, field, condition=None):
key = path + "/" + field
if getattr(obj,'custom_script_source',None) is not None:
key = 'customscript/' + obj.custom_script_source + '/' + key
if getattr(obj, 'do_not_save_to_config', False):
return
saved_value = ui_settings.get(key, None)
if saved_value is None:
ui_settings[key] = getattr(obj, field)
elif condition is None or condition(saved_value):
setattr(obj, field, saved_value)
if type(x) in [gr.Slider, gr.Radio, gr.Checkbox, gr.Textbox, gr.Number] and x.visible:
apply_field(x, 'visible')
if type(x) == gr.Slider:
apply_field(x, 'value')
apply_field(x, 'minimum')
apply_field(x, 'maximum')
apply_field(x, 'step')
if type(x) == gr.Radio:
apply_field(x, 'value', lambda val: val in x.choices)
if type(x) == gr.Checkbox:
apply_field(x, 'value')
if type(x) == gr.Textbox:
apply_field(x, 'value')
if type(x) == gr.Number:
apply_field(x, 'value')
visit(txt2img_interface, loadsave, "txt2img")
visit(img2img_interface, loadsave, "img2img")
visit(extras_interface, loadsave, "extras")
if not error_loading and (not os.path.exists(ui_config_file) or settings_count != len(ui_settings)):
with open(ui_config_file, "w", encoding="utf8") as file:
json.dump(ui_settings, file, indent=4)
return demo
with open(os.path.join(script_path, "script.js"), "r", encoding="utf8") as jsfile:
javascript = f'<script>{jsfile.read()}</script>'
jsdir = os.path.join(script_path, "javascript")
for filename in sorted(os.listdir(jsdir)):
with open(os.path.join(jsdir, filename), "r", encoding="utf8") as jsfile:
javascript += f"\n<script>{jsfile.read()}</script>"
def template_response(*args, **kwargs):
res = gradio_routes_templates_response(*args, **kwargs)
res.body = res.body.replace(b'</head>', f'{javascript}</head>'.encode("utf8"))
res.init_headers()
return res
gradio_routes_templates_response = gradio.routes.templates.TemplateResponse
gradio.routes.templates.TemplateResponse = template_response
<|code_end|>
|
Checkpoint Merger - FileNotFoundError: [Errno 2] No such file or directory:
When using custom model folder: COMMANDLINE_ARGS=--ckpt-dir "d:\external\models\" the checkpoint merger can't find the models
FileNotFoundError: [Errno 2] No such file or directory: 'models/wd-v1-2-full-ema.ckpt'
**To Reproduce**
Steps to reproduce the behavior:
1. set COMMANDLINE_ARGS=--ckpt-dir "d:\external\models"
2. checkpint merger tab
3. select models it found in the custom folder
4. click run
**Expected behavior**
Would expect that since it knows where the models are, that it would continue to use that folder
Checkpoint Merger - FileNotFoundError: [Errno 2] No such file or directory:
When using custom model folder: COMMANDLINE_ARGS=--ckpt-dir "d:\external\models\" the checkpoint merger can't find the models
FileNotFoundError: [Errno 2] No such file or directory: 'models/wd-v1-2-full-ema.ckpt'
**To Reproduce**
Steps to reproduce the behavior:
1. set COMMANDLINE_ARGS=--ckpt-dir "d:\external\models"
2. checkpint merger tab
3. select models it found in the custom folder
4. click run
**Expected behavior**
Would expect that since it knows where the models are, that it would continue to use that folder
|
modules/ui.py
<|code_start|>import base64
import html
import io
import json
import math
import mimetypes
import os
import random
import sys
import time
import traceback
import numpy as np
import torch
from PIL import Image, PngImagePlugin
import gradio as gr
import gradio.utils
import gradio.routes
from modules.paths import script_path
from modules.shared import opts, cmd_opts
import modules.shared as shared
from modules.sd_samplers import samplers, samplers_for_img2img
from modules.sd_hijack import model_hijack
import modules.ldsr_model
import modules.scripts
import modules.gfpgan_model
import modules.codeformer_model
import modules.styles
import modules.generation_parameters_copypaste
# this is a fix for Windows users. Without it, javascript files will be served with text/html content-type and the bowser will not show any UI
mimetypes.init()
mimetypes.add_type('application/javascript', '.js')
if not cmd_opts.share and not cmd_opts.listen:
# fix gradio phoning home
gradio.utils.version_check = lambda: None
gradio.utils.get_local_ip_address = lambda: '127.0.0.1'
def gr_show(visible=True):
return {"visible": visible, "__type__": "update"}
sample_img2img = "assets/stable-samples/img2img/sketch-mountains-input.jpg"
sample_img2img = sample_img2img if os.path.exists(sample_img2img) else None
css_hide_progressbar = """
.wrap .m-12 svg { display:none!important; }
.wrap .m-12::before { content:"Loading..." }
.progress-bar { display:none!important; }
.meta-text { display:none!important; }
"""
# Using constants for these since the variation selector isn't visible.
# Important that they exactly match script.js for tooltip to work.
random_symbol = '\U0001f3b2\ufe0f' # 🎲️
reuse_symbol = '\u267b\ufe0f' # ♻️
art_symbol = '\U0001f3a8' # 🎨
paste_symbol = '\u2199\ufe0f' # ↙
def plaintext_to_html(text):
text = "<p>" + "<br>\n".join([f"{html.escape(x)}" for x in text.split('\n')]) + "</p>"
return text
def image_from_url_text(filedata):
if type(filedata) == list:
if len(filedata) == 0:
return None
filedata = filedata[0]
if filedata.startswith("data:image/png;base64,"):
filedata = filedata[len("data:image/png;base64,"):]
filedata = base64.decodebytes(filedata.encode('utf-8'))
image = Image.open(io.BytesIO(filedata))
return image
def send_gradio_gallery_to_image(x):
if len(x) == 0:
return None
return image_from_url_text(x[0])
def save_files(js_data, images, index):
import csv
os.makedirs(opts.outdir_save, exist_ok=True)
filenames = []
data = json.loads(js_data)
if index > -1 and opts.save_selected_only and (index >= data["index_of_first_image"]): # ensures we are looking at a specific non-grid picture, and we have save_selected_only
images = [images[index]]
infotexts = [data["infotexts"][index]]
else:
infotexts = data["infotexts"]
with open(os.path.join(opts.outdir_save, "log.csv"), "a", encoding="utf8", newline='') as file:
at_start = file.tell() == 0
writer = csv.writer(file)
if at_start:
writer.writerow(["prompt", "seed", "width", "height", "sampler", "cfgs", "steps", "filename", "negative_prompt"])
filename_base = str(int(time.time() * 1000))
for i, filedata in enumerate(images):
filename = filename_base + ("" if len(images) == 1 else "-" + str(i + 1)) + ".png"
filepath = os.path.join(opts.outdir_save, filename)
if filedata.startswith("data:image/png;base64,"):
filedata = filedata[len("data:image/png;base64,"):]
pnginfo = PngImagePlugin.PngInfo()
pnginfo.add_text('parameters', infotexts[i])
image = Image.open(io.BytesIO(base64.decodebytes(filedata.encode('utf-8'))))
image.save(filepath, quality=opts.jpeg_quality, pnginfo=pnginfo)
filenames.append(filename)
writer.writerow([data["prompt"], data["seed"], data["width"], data["height"], data["sampler"], data["cfg_scale"], data["steps"], filenames[0], data["negative_prompt"]])
return '', '', plaintext_to_html(f"Saved: {filenames[0]}")
def wrap_gradio_call(func):
def f(*args, **kwargs):
run_memmon = opts.memmon_poll_rate > 0 and not shared.mem_mon.disabled
if run_memmon:
shared.mem_mon.monitor()
t = time.perf_counter()
try:
res = list(func(*args, **kwargs))
except Exception as e:
print("Error completing request", file=sys.stderr)
print("Arguments:", args, kwargs, file=sys.stderr)
print(traceback.format_exc(), file=sys.stderr)
shared.state.job = ""
shared.state.job_count = 0
res = [None, '', f"<div class='error'>{plaintext_to_html(type(e).__name__+': '+str(e))}</div>"]
elapsed = time.perf_counter() - t
if run_memmon:
mem_stats = {k: -(v//-(1024*1024)) for k, v in shared.mem_mon.stop().items()}
active_peak = mem_stats['active_peak']
reserved_peak = mem_stats['reserved_peak']
sys_peak = mem_stats['system_peak']
sys_total = mem_stats['total']
sys_pct = round(sys_peak/max(sys_total, 1) * 100, 2)
vram_html = f"<p class='vram'>Torch active/reserved: {active_peak}/{reserved_peak} MiB, <wbr>Sys VRAM: {sys_peak}/{sys_total} MiB ({sys_pct}%)</p>"
else:
vram_html = ''
# last item is always HTML
res[-1] += f"<div class='performance'><p class='time'>Time taken: <wbr>{elapsed:.2f}s</p>{vram_html}</div>"
shared.state.interrupted = False
return tuple(res)
return f
def check_progress_call(id_part):
if shared.state.job_count == 0:
return "", gr_show(False), gr_show(False)
progress = 0
if shared.state.job_count > 0:
progress += shared.state.job_no / shared.state.job_count
if shared.state.sampling_steps > 0:
progress += 1 / shared.state.job_count * shared.state.sampling_step / shared.state.sampling_steps
progress = min(progress, 1)
progressbar = ""
if opts.show_progressbar:
progressbar = f"""<div class='progressDiv'><div class='progress' style="width:{progress * 100}%">{str(int(progress*100))+"%" if progress > 0.01 else ""}</div></div>"""
image = gr_show(False)
preview_visibility = gr_show(False)
if opts.show_progress_every_n_steps > 0:
if shared.parallel_processing_allowed:
if shared.state.sampling_step - shared.state.current_image_sampling_step >= opts.show_progress_every_n_steps and shared.state.current_latent is not None:
shared.state.current_image = modules.sd_samplers.sample_to_image(shared.state.current_latent)
shared.state.current_image_sampling_step = shared.state.sampling_step
image = shared.state.current_image
if image is None:
image = gr.update(value=None)
else:
preview_visibility = gr_show(True)
return f"<span id='{id_part}_progress_span' style='display: none'>{time.time()}</span><p>{progressbar}</p>", preview_visibility, image
def check_progress_call_initial(id_part):
shared.state.job_count = -1
shared.state.current_latent = None
shared.state.current_image = None
return check_progress_call(id_part)
def roll_artist(prompt):
allowed_cats = set([x for x in shared.artist_db.categories() if len(opts.random_artist_categories)==0 or x in opts.random_artist_categories])
artist = random.choice([x for x in shared.artist_db.artists if x.category in allowed_cats])
return prompt + ", " + artist.name if prompt != '' else artist.name
def visit(x, func, path=""):
if hasattr(x, 'children'):
for c in x.children:
visit(c, func, path)
elif x.label is not None:
func(path + "/" + str(x.label), x)
def add_style(name: str, prompt: str, negative_prompt: str):
if name is None:
return [gr_show(), gr_show()]
style = modules.styles.PromptStyle(name, prompt, negative_prompt)
shared.prompt_styles.styles[style.name] = style
# Save all loaded prompt styles: this allows us to update the storage format in the future more easily, because we
# reserialize all styles every time we save them
shared.prompt_styles.save_styles(shared.styles_filename)
return [gr.Dropdown.update(visible=True, choices=list(shared.prompt_styles.styles)) for _ in range(4)]
def apply_styles(prompt, prompt_neg, style1_name, style2_name):
prompt = shared.prompt_styles.apply_styles_to_prompt(prompt, [style1_name, style2_name])
prompt_neg = shared.prompt_styles.apply_negative_styles_to_prompt(prompt_neg, [style1_name, style2_name])
return [gr.Textbox.update(value=prompt), gr.Textbox.update(value=prompt_neg), gr.Dropdown.update(value="None"), gr.Dropdown.update(value="None")]
def interrogate(image):
prompt = shared.interrogator.interrogate(image)
return gr_show(True) if prompt is None else prompt
def create_seed_inputs():
with gr.Row():
with gr.Box():
with gr.Row(elem_id='seed_row'):
seed = (gr.Textbox if cmd_opts.use_textbox_seed else gr.Number)(label='Seed', value=-1)
seed.style(container=False)
random_seed = gr.Button(random_symbol, elem_id='random_seed')
reuse_seed = gr.Button(reuse_symbol, elem_id='reuse_seed')
with gr.Box(elem_id='subseed_show_box'):
seed_checkbox = gr.Checkbox(label='Extra', elem_id='subseed_show', value=False)
# Components to show/hide based on the 'Extra' checkbox
seed_extras = []
with gr.Row(visible=False) as seed_extra_row_1:
seed_extras.append(seed_extra_row_1)
with gr.Box():
with gr.Row(elem_id='subseed_row'):
subseed = gr.Number(label='Variation seed', value=-1)
subseed.style(container=False)
random_subseed = gr.Button(random_symbol, elem_id='random_subseed')
reuse_subseed = gr.Button(reuse_symbol, elem_id='reuse_subseed')
subseed_strength = gr.Slider(label='Variation strength', value=0.0, minimum=0, maximum=1, step=0.01)
with gr.Row(visible=False) as seed_extra_row_2:
seed_extras.append(seed_extra_row_2)
seed_resize_from_w = gr.Slider(minimum=0, maximum=2048, step=64, label="Resize seed from width", value=0)
seed_resize_from_h = gr.Slider(minimum=0, maximum=2048, step=64, label="Resize seed from height", value=0)
random_seed.click(fn=lambda: -1, show_progress=False, inputs=[], outputs=[seed])
random_subseed.click(fn=lambda: -1, show_progress=False, inputs=[], outputs=[subseed])
def change_visibility(show):
return {comp: gr_show(show) for comp in seed_extras}
seed_checkbox.change(change_visibility, show_progress=False, inputs=[seed_checkbox], outputs=seed_extras)
return seed, reuse_seed, subseed, reuse_subseed, subseed_strength, seed_resize_from_h, seed_resize_from_w, seed_checkbox
def connect_reuse_seed(seed: gr.Number, reuse_seed: gr.Button, generation_info: gr.Textbox, dummy_component, is_subseed):
""" Connects a 'reuse (sub)seed' button's click event so that it copies last used
(sub)seed value from generation info the to the seed field. If copying subseed and subseed strength
was 0, i.e. no variation seed was used, it copies the normal seed value instead."""
def copy_seed(gen_info_string: str, index):
res = -1
try:
gen_info = json.loads(gen_info_string)
index -= gen_info.get('index_of_first_image', 0)
if is_subseed and gen_info.get('subseed_strength', 0) > 0:
all_subseeds = gen_info.get('all_subseeds', [-1])
res = all_subseeds[index if 0 <= index < len(all_subseeds) else 0]
else:
all_seeds = gen_info.get('all_seeds', [-1])
res = all_seeds[index if 0 <= index < len(all_seeds) else 0]
except json.decoder.JSONDecodeError as e:
if gen_info_string != '':
print("Error parsing JSON generation info:", file=sys.stderr)
print(gen_info_string, file=sys.stderr)
return [res, gr_show(False)]
reuse_seed.click(
fn=copy_seed,
_js="(x, y) => [x, selected_gallery_index()]",
show_progress=False,
inputs=[generation_info, dummy_component],
outputs=[seed, dummy_component]
)
def update_token_counter(text):
tokens, token_count, max_length = model_hijack.tokenize(text)
style_class = ' class="red"' if (token_count > max_length) else ""
return f"<span {style_class}>{token_count}/{max_length}</span>"
def create_toprow(is_img2img):
id_part = "img2img" if is_img2img else "txt2img"
with gr.Row(elem_id="toprow"):
with gr.Column(scale=4):
with gr.Row():
with gr.Column(scale=80):
with gr.Row():
prompt = gr.Textbox(label="Prompt", elem_id=f"{id_part}_prompt", show_label=False, placeholder="Prompt", lines=2)
with gr.Column(scale=1, elem_id="roll_col"):
roll = gr.Button(value=art_symbol, elem_id="roll", visible=len(shared.artist_db.artists) > 0)
paste = gr.Button(value=paste_symbol, elem_id="paste")
token_counter = gr.HTML(value="<span></span>", elem_id=f"{id_part}_token_counter")
hidden_button = gr.Button(visible=False, elem_id=f"{id_part}_token_button")
hidden_button.click(fn=update_token_counter, inputs=[prompt], outputs=[token_counter])
with gr.Column(scale=10, elem_id="style_pos_col"):
prompt_style = gr.Dropdown(label="Style 1", elem_id=f"{id_part}_style_index", choices=[k for k, v in shared.prompt_styles.styles.items()], value=next(iter(shared.prompt_styles.styles.keys())), visible=len(shared.prompt_styles.styles) > 1)
with gr.Row():
with gr.Column(scale=8):
negative_prompt = gr.Textbox(label="Negative prompt", elem_id="negative_prompt", show_label=False, placeholder="Negative prompt", lines=2)
with gr.Column(scale=1, elem_id="style_neg_col"):
prompt_style2 = gr.Dropdown(label="Style 2", elem_id=f"{id_part}_style2_index", choices=[k for k, v in shared.prompt_styles.styles.items()], value=next(iter(shared.prompt_styles.styles.keys())), visible=len(shared.prompt_styles.styles) > 1)
with gr.Column(scale=1):
with gr.Row():
interrupt = gr.Button('Interrupt', elem_id=f"{id_part}_interrupt")
submit = gr.Button('Generate', elem_id="generate", variant='primary')
interrupt.click(
fn=lambda: shared.state.interrupt(),
inputs=[],
outputs=[],
)
with gr.Row():
if is_img2img:
interrogate = gr.Button('Interrogate', elem_id="interrogate")
else:
interrogate = None
prompt_style_apply = gr.Button('Apply style', elem_id="style_apply")
save_style = gr.Button('Create style', elem_id="style_create")
return prompt, roll, prompt_style, negative_prompt, prompt_style2, submit, interrogate, prompt_style_apply, save_style, paste
def setup_progressbar(progressbar, preview, id_part):
check_progress = gr.Button('Check progress', elem_id=f"{id_part}_check_progress", visible=False)
check_progress.click(
fn=lambda: check_progress_call(id_part),
show_progress=False,
inputs=[],
outputs=[progressbar, preview, preview],
)
check_progress_initial = gr.Button('Check progress (first)', elem_id=f"{id_part}_check_progress_initial", visible=False)
check_progress_initial.click(
fn=lambda: check_progress_call_initial(id_part),
show_progress=False,
inputs=[],
outputs=[progressbar, preview, preview],
)
def create_ui(txt2img, img2img, run_extras, run_pnginfo, run_modelmerger):
with gr.Blocks(analytics_enabled=False) as txt2img_interface:
txt2img_prompt, roll, txt2img_prompt_style, txt2img_negative_prompt, txt2img_prompt_style2, submit, _, txt2img_prompt_style_apply, txt2img_save_style, paste = create_toprow(is_img2img=False)
dummy_component = gr.Label(visible=False)
with gr.Row(elem_id='txt2img_progress_row'):
with gr.Column(scale=1):
pass
with gr.Column(scale=1):
progressbar = gr.HTML(elem_id="txt2img_progressbar")
txt2img_preview = gr.Image(elem_id='txt2img_preview', visible=False)
setup_progressbar(progressbar, txt2img_preview, 'txt2img')
with gr.Row().style(equal_height=False):
with gr.Column(variant='panel'):
steps = gr.Slider(minimum=1, maximum=150, step=1, label="Sampling Steps", value=20)
sampler_index = gr.Radio(label='Sampling method', elem_id="txt2img_sampling", choices=[x.name for x in samplers], value=samplers[0].name, type="index")
with gr.Group():
width = gr.Slider(minimum=64, maximum=2048, step=64, label="Width", value=512)
height = gr.Slider(minimum=64, maximum=2048, step=64, label="Height", value=512)
with gr.Row():
restore_faces = gr.Checkbox(label='Restore faces', value=False, visible=len(shared.face_restorers) > 1)
tiling = gr.Checkbox(label='Tiling', value=False)
enable_hr = gr.Checkbox(label='Highres. fix', value=False)
with gr.Row(visible=False) as hr_options:
scale_latent = gr.Checkbox(label='Scale latent', value=False)
denoising_strength = gr.Slider(minimum=0.0, maximum=1.0, step=0.01, label='Denoising strength', value=0.7)
with gr.Row():
batch_count = gr.Slider(minimum=1, maximum=cmd_opts.max_batch_count, step=1, label='Batch count', value=1)
batch_size = gr.Slider(minimum=1, maximum=8, step=1, label='Batch size', value=1)
cfg_scale = gr.Slider(minimum=1.0, maximum=30.0, step=0.5, label='CFG Scale', value=7.0)
seed, reuse_seed, subseed, reuse_subseed, subseed_strength, seed_resize_from_h, seed_resize_from_w, seed_checkbox = create_seed_inputs()
with gr.Group():
custom_inputs = modules.scripts.scripts_txt2img.setup_ui(is_img2img=False)
with gr.Column(variant='panel'):
with gr.Group():
txt2img_preview = gr.Image(elem_id='txt2img_preview', visible=False)
txt2img_gallery = gr.Gallery(label='Output', show_label=False, elem_id='txt2img_gallery').style(grid=4)
with gr.Group():
with gr.Row():
save = gr.Button('Save')
send_to_img2img = gr.Button('Send to img2img')
send_to_inpaint = gr.Button('Send to inpaint')
send_to_extras = gr.Button('Send to extras')
with gr.Group():
html_info = gr.HTML()
generation_info = gr.Textbox(visible=False)
connect_reuse_seed(seed, reuse_seed, generation_info, dummy_component, is_subseed=False)
connect_reuse_seed(subseed, reuse_subseed, generation_info, dummy_component, is_subseed=True)
txt2img_args = dict(
fn=txt2img,
_js="submit",
inputs=[
txt2img_prompt,
txt2img_negative_prompt,
txt2img_prompt_style,
txt2img_prompt_style2,
steps,
sampler_index,
restore_faces,
tiling,
batch_count,
batch_size,
cfg_scale,
seed,
subseed, subseed_strength, seed_resize_from_h, seed_resize_from_w, seed_checkbox,
height,
width,
enable_hr,
scale_latent,
denoising_strength,
] + custom_inputs,
outputs=[
txt2img_gallery,
generation_info,
html_info
],
show_progress=False,
)
txt2img_prompt.submit(**txt2img_args)
submit.click(**txt2img_args)
enable_hr.change(
fn=lambda x: gr_show(x),
inputs=[enable_hr],
outputs=[hr_options],
)
save.click(
fn=wrap_gradio_call(save_files),
_js="(x, y, z) => [x, y, selected_gallery_index()]",
inputs=[
generation_info,
txt2img_gallery,
html_info,
],
outputs=[
html_info,
html_info,
html_info,
]
)
roll.click(
fn=roll_artist,
inputs=[
txt2img_prompt,
],
outputs=[
txt2img_prompt,
]
)
txt2img_paste_fields = [
(txt2img_prompt, "Prompt"),
(txt2img_negative_prompt, "Negative prompt"),
(steps, "Steps"),
(sampler_index, "Sampler"),
(restore_faces, "Face restoration"),
(cfg_scale, "CFG scale"),
(seed, "Seed"),
(width, "Size-1"),
(height, "Size-2"),
(batch_size, "Batch size"),
(subseed, "Variation seed"),
(subseed_strength, "Variation seed strength"),
(seed_resize_from_w, "Seed resize from-1"),
(seed_resize_from_h, "Seed resize from-2"),
(denoising_strength, "Denoising strength"),
(enable_hr, lambda d: "Denoising strength" in d),
(hr_options, lambda d: gr.Row.update(visible="Denoising strength" in d)),
]
modules.generation_parameters_copypaste.connect_paste(paste, txt2img_paste_fields, txt2img_prompt)
with gr.Blocks(analytics_enabled=False) as img2img_interface:
img2img_prompt, roll, img2img_prompt_style, img2img_negative_prompt, img2img_prompt_style2, submit, img2img_interrogate, img2img_prompt_style_apply, img2img_save_style, paste = create_toprow(is_img2img=True)
with gr.Row(elem_id='img2img_progress_row'):
with gr.Column(scale=1):
pass
with gr.Column(scale=1):
progressbar = gr.HTML(elem_id="img2img_progressbar")
img2img_preview = gr.Image(elem_id='img2img_preview', visible=False)
setup_progressbar(progressbar, img2img_preview, 'img2img')
with gr.Row().style(equal_height=False):
with gr.Column(variant='panel'):
with gr.Tabs(elem_id="mode_img2img") as tabs_img2img_mode:
with gr.TabItem('img2img', id='img2img'):
init_img = gr.Image(label="Image for img2img", show_label=False, source="upload", interactive=True, type="pil")
with gr.TabItem('Inpaint', id='inpaint'):
init_img_with_mask = gr.Image(label="Image for inpainting with mask", show_label=False, elem_id="img2maskimg", source="upload", interactive=True, type="pil", tool="sketch", image_mode="RGBA")
init_img_inpaint = gr.Image(label="Image for img2img", show_label=False, source="upload", interactive=True, type="pil", visible=False, elem_id="img_inpaint_base")
init_mask_inpaint = gr.Image(label="Mask", source="upload", interactive=True, type="pil", visible=False, elem_id="img_inpaint_mask")
mask_blur = gr.Slider(label='Mask blur', minimum=0, maximum=64, step=1, value=4)
with gr.Row():
mask_mode = gr.Radio(label="Mask mode", show_label=False, choices=["Draw mask", "Upload mask"], type="index", value="Draw mask", elem_id="mask_mode")
inpainting_mask_invert = gr.Radio(label='Masking mode', show_label=False, choices=['Inpaint masked', 'Inpaint not masked'], value='Inpaint masked', type="index")
inpainting_fill = gr.Radio(label='Masked content', choices=['fill', 'original', 'latent noise', 'latent nothing'], value='fill', type="index")
with gr.Row():
inpaint_full_res = gr.Checkbox(label='Inpaint at full resolution', value=False)
inpaint_full_res_padding = gr.Slider(label='Inpaint at full resolution padding, pixels', minimum=0, maximum=256, step=4, value=32)
with gr.TabItem('Batch img2img', id='batch'):
hidden = '<br>Disabled when launched with --hide-ui-dir-config.' if shared.cmd_opts.hide_ui_dir_config else ''
gr.HTML(f"<p class=\"text-gray-500\">Process images in a directory on the same machine where the server is running.{hidden}</p>")
img2img_batch_input_dir = gr.Textbox(label="Input directory", **shared.hide_dirs)
img2img_batch_output_dir = gr.Textbox(label="Output directory", **shared.hide_dirs)
with gr.Row():
resize_mode = gr.Radio(label="Resize mode", elem_id="resize_mode", show_label=False, choices=["Just resize", "Crop and resize", "Resize and fill"], type="index", value="Just resize")
steps = gr.Slider(minimum=1, maximum=150, step=1, label="Sampling Steps", value=20)
sampler_index = gr.Radio(label='Sampling method', choices=[x.name for x in samplers_for_img2img], value=samplers_for_img2img[0].name, type="index")
with gr.Group():
width = gr.Slider(minimum=64, maximum=2048, step=64, label="Width", value=512)
height = gr.Slider(minimum=64, maximum=2048, step=64, label="Height", value=512)
with gr.Row():
restore_faces = gr.Checkbox(label='Restore faces', value=False, visible=len(shared.face_restorers) > 1)
tiling = gr.Checkbox(label='Tiling', value=False)
with gr.Row():
batch_count = gr.Slider(minimum=1, maximum=cmd_opts.max_batch_count, step=1, label='Batch count', value=1)
batch_size = gr.Slider(minimum=1, maximum=8, step=1, label='Batch size', value=1)
with gr.Group():
cfg_scale = gr.Slider(minimum=1.0, maximum=30.0, step=0.5, label='CFG Scale', value=7.0)
denoising_strength = gr.Slider(minimum=0.0, maximum=1.0, step=0.01, label='Denoising strength', value=0.75)
seed, reuse_seed, subseed, reuse_subseed, subseed_strength, seed_resize_from_h, seed_resize_from_w, seed_checkbox = create_seed_inputs()
with gr.Group():
custom_inputs = modules.scripts.scripts_img2img.setup_ui(is_img2img=True)
with gr.Column(variant='panel'):
with gr.Group():
img2img_preview = gr.Image(elem_id='img2img_preview', visible=False)
img2img_gallery = gr.Gallery(label='Output', show_label=False, elem_id='img2img_gallery').style(grid=4)
with gr.Group():
with gr.Row():
save = gr.Button('Save')
img2img_send_to_img2img = gr.Button('Send to img2img')
img2img_send_to_inpaint = gr.Button('Send to inpaint')
img2img_send_to_extras = gr.Button('Send to extras')
with gr.Group():
html_info = gr.HTML()
generation_info = gr.Textbox(visible=False)
connect_reuse_seed(seed, reuse_seed, generation_info, dummy_component, is_subseed=False)
connect_reuse_seed(subseed, reuse_subseed, generation_info, dummy_component, is_subseed=True)
mask_mode.change(
lambda mode, img: {
init_img_with_mask: gr_show(mode == 0),
init_img_inpaint: gr_show(mode == 1),
init_mask_inpaint: gr_show(mode == 1),
},
inputs=[mask_mode, init_img_with_mask],
outputs=[
init_img_with_mask,
init_img_inpaint,
init_mask_inpaint,
],
)
img2img_args = dict(
fn=img2img,
_js="submit_img2img",
inputs=[
dummy_component,
img2img_prompt,
img2img_negative_prompt,
img2img_prompt_style,
img2img_prompt_style2,
init_img,
init_img_with_mask,
init_img_inpaint,
init_mask_inpaint,
mask_mode,
steps,
sampler_index,
mask_blur,
inpainting_fill,
restore_faces,
tiling,
batch_count,
batch_size,
cfg_scale,
denoising_strength,
seed,
subseed, subseed_strength, seed_resize_from_h, seed_resize_from_w, seed_checkbox,
height,
width,
resize_mode,
inpaint_full_res,
inpaint_full_res_padding,
inpainting_mask_invert,
img2img_batch_input_dir,
img2img_batch_output_dir,
] + custom_inputs,
outputs=[
img2img_gallery,
generation_info,
html_info
],
show_progress=False,
)
img2img_prompt.submit(**img2img_args)
submit.click(**img2img_args)
img2img_interrogate.click(
fn=interrogate,
inputs=[init_img],
outputs=[img2img_prompt],
)
save.click(
fn=wrap_gradio_call(save_files),
_js="(x, y, z) => [x, y, selected_gallery_index()]",
inputs=[
generation_info,
img2img_gallery,
html_info
],
outputs=[
html_info,
html_info,
html_info,
]
)
roll.click(
fn=roll_artist,
inputs=[
img2img_prompt,
],
outputs=[
img2img_prompt,
]
)
prompts = [(txt2img_prompt, txt2img_negative_prompt), (img2img_prompt, img2img_negative_prompt)]
style_dropdowns = [(txt2img_prompt_style, txt2img_prompt_style2), (img2img_prompt_style, img2img_prompt_style2)]
for button, (prompt, negative_prompt) in zip([txt2img_save_style, img2img_save_style], prompts):
button.click(
fn=add_style,
_js="ask_for_style_name",
# Have to pass empty dummy component here, because the JavaScript and Python function have to accept
# the same number of parameters, but we only know the style-name after the JavaScript prompt
inputs=[dummy_component, prompt, negative_prompt],
outputs=[txt2img_prompt_style, img2img_prompt_style, txt2img_prompt_style2, img2img_prompt_style2],
)
for button, (prompt, negative_prompt), (style1, style2) in zip([txt2img_prompt_style_apply, img2img_prompt_style_apply], prompts, style_dropdowns):
button.click(
fn=apply_styles,
inputs=[prompt, negative_prompt, style1, style2],
outputs=[prompt, negative_prompt, style1, style2],
)
img2img_paste_fields = [
(img2img_prompt, "Prompt"),
(img2img_negative_prompt, "Negative prompt"),
(steps, "Steps"),
(sampler_index, "Sampler"),
(restore_faces, "Face restoration"),
(cfg_scale, "CFG scale"),
(seed, "Seed"),
(width, "Size-1"),
(height, "Size-2"),
(batch_size, "Batch size"),
(subseed, "Variation seed"),
(subseed_strength, "Variation seed strength"),
(seed_resize_from_w, "Seed resize from-1"),
(seed_resize_from_h, "Seed resize from-2"),
(denoising_strength, "Denoising strength"),
]
modules.generation_parameters_copypaste.connect_paste(paste, img2img_paste_fields, img2img_prompt)
with gr.Blocks(analytics_enabled=False) as extras_interface:
with gr.Row().style(equal_height=False):
with gr.Column(variant='panel'):
with gr.Tabs(elem_id="mode_extras"):
with gr.TabItem('Single Image'):
extras_image = gr.Image(label="Source", source="upload", interactive=True, type="pil")
with gr.TabItem('Batch Process'):
image_batch = gr.File(label="Batch Process", file_count="multiple", interactive=True, type="file")
upscaling_resize = gr.Slider(minimum=1.0, maximum=4.0, step=0.05, label="Resize", value=2)
with gr.Group():
extras_upscaler_1 = gr.Radio(label='Upscaler 1', choices=[x.name for x in shared.sd_upscalers], value=shared.sd_upscalers[0].name, type="index")
with gr.Group():
extras_upscaler_2 = gr.Radio(label='Upscaler 2', choices=[x.name for x in shared.sd_upscalers], value=shared.sd_upscalers[0].name, type="index")
extras_upscaler_2_visibility = gr.Slider(minimum=0.0, maximum=1.0, step=0.001, label="Upscaler 2 visibility", value=1)
with gr.Group():
gfpgan_visibility = gr.Slider(minimum=0.0, maximum=1.0, step=0.001, label="GFPGAN visibility", value=0, interactive=modules.gfpgan_model.have_gfpgan)
with gr.Group():
codeformer_visibility = gr.Slider(minimum=0.0, maximum=1.0, step=0.001, label="CodeFormer visibility", value=0, interactive=modules.codeformer_model.have_codeformer)
codeformer_weight = gr.Slider(minimum=0.0, maximum=1.0, step=0.001, label="CodeFormer weight (0 = maximum effect, 1 = minimum effect)", value=0, interactive=modules.codeformer_model.have_codeformer)
submit = gr.Button('Generate', elem_id="extras_generate", variant='primary')
with gr.Column(variant='panel'):
result_images = gr.Gallery(label="Result", show_label=False)
html_info_x = gr.HTML()
html_info = gr.HTML()
extras_send_to_img2img = gr.Button('Send to img2img')
extras_send_to_inpaint = gr.Button('Send to inpaint')
submit.click(
fn=run_extras,
_js="get_extras_tab_index",
inputs=[
dummy_component,
extras_image,
image_batch,
gfpgan_visibility,
codeformer_visibility,
codeformer_weight,
upscaling_resize,
extras_upscaler_1,
extras_upscaler_2,
extras_upscaler_2_visibility,
],
outputs=[
result_images,
html_info_x,
html_info,
]
)
extras_send_to_img2img.click(
fn=lambda x: image_from_url_text(x),
_js="extract_image_from_gallery_img2img",
inputs=[result_images],
outputs=[init_img],
)
extras_send_to_inpaint.click(
fn=lambda x: image_from_url_text(x),
_js="extract_image_from_gallery_img2img",
inputs=[result_images],
outputs=[init_img_with_mask],
)
with gr.Blocks(analytics_enabled=False) as pnginfo_interface:
with gr.Row().style(equal_height=False):
with gr.Column(variant='panel'):
image = gr.Image(elem_id="pnginfo_image", label="Source", source="upload", interactive=True, type="pil")
with gr.Column(variant='panel'):
html = gr.HTML()
generation_info = gr.Textbox(visible=False)
html2 = gr.HTML()
with gr.Row():
pnginfo_send_to_txt2img = gr.Button('Send to txt2img')
pnginfo_send_to_img2img = gr.Button('Send to img2img')
image.change(
fn=wrap_gradio_call(run_pnginfo),
inputs=[image],
outputs=[html, generation_info, html2],
)
with gr.Blocks() as modelmerger_interface:
with gr.Row().style(equal_height=False):
with gr.Column(variant='panel'):
gr.HTML(value="<p>A merger of the two checkpoints will be generated in your <b>/models</b> directory.</p>")
with gr.Row():
ckpt_name_list = sorted([x.title for x in modules.sd_models.checkpoints_list.values()])
primary_model_name = gr.Dropdown(ckpt_name_list, elem_id="modelmerger_primary_model_name", label="Primary Model Name")
secondary_model_name = gr.Dropdown(ckpt_name_list, elem_id="modelmerger_secondary_model_name", label="Secondary Model Name")
interp_amount = gr.Slider(minimum=0.0, maximum=1.0, step=0.05, label='Interpolation Amount', value=0.3)
interp_method = gr.Radio(choices=["Weighted Sum", "Sigmoid", "Inverse Sigmoid"], value="Weighted Sum", label="Interpolation Method")
submit = gr.Button(elem_id="modelmerger_merge", label="Merge", variant='primary')
with gr.Column(variant='panel'):
submit_result = gr.Textbox(elem_id="modelmerger_result", show_label=False)
submit.click(
fn=run_modelmerger,
inputs=[
primary_model_name,
secondary_model_name,
interp_method,
interp_amount
],
outputs=[
submit_result,
]
)
def create_setting_component(key):
def fun():
return opts.data[key] if key in opts.data else opts.data_labels[key].default
info = opts.data_labels[key]
t = type(info.default)
args = info.component_args() if callable(info.component_args) else info.component_args
if info.component is not None:
comp = info.component
elif t == str:
comp = gr.Textbox
elif t == int:
comp = gr.Number
elif t == bool:
comp = gr.Checkbox
else:
raise Exception(f'bad options item type: {str(t)} for key {key}')
return comp(label=info.label, value=fun, **(args or {}))
components = []
def run_settings(*args):
changed = 0
for key, value, comp in zip(opts.data_labels.keys(), args, components):
if not opts.same_type(value, opts.data_labels[key].default):
return f"Bad value for setting {key}: {value}; expecting {type(opts.data_labels[key].default).__name__}"
for key, value, comp in zip(opts.data_labels.keys(), args, components):
comp_args = opts.data_labels[key].component_args
if comp_args and isinstance(comp_args, dict) and comp_args.get('visible') is False:
continue
oldval = opts.data.get(key, None)
opts.data[key] = value
if oldval != value:
if opts.data_labels[key].onchange is not None:
opts.data_labels[key].onchange()
changed += 1
opts.save(shared.config_filename)
return f'{changed} settings changed.', opts.dumpjson()
with gr.Blocks(analytics_enabled=False) as settings_interface:
settings_submit = gr.Button(value="Apply settings", variant='primary')
result = gr.HTML()
settings_cols = 3
items_per_col = int(len(opts.data_labels) * 0.9 / settings_cols)
cols_displayed = 0
items_displayed = 0
previous_section = None
column = None
with gr.Row(elem_id="settings").style(equal_height=False):
for i, (k, item) in enumerate(opts.data_labels.items()):
if previous_section != item.section:
if cols_displayed < settings_cols and (items_displayed >= items_per_col or previous_section is None):
if column is not None:
column.__exit__()
column = gr.Column(variant='panel')
column.__enter__()
items_displayed = 0
cols_displayed += 1
previous_section = item.section
gr.HTML(elem_id="settings_header_text_{}".format(item.section[0]), value='<h1 class="gr-button-lg">{}</h1>'.format(item.section[1]))
components.append(create_setting_component(k))
items_displayed += 1
request_notifications = gr.Button(value='Request browser notifications', elem_id="request_notifications")
request_notifications.click(
fn=lambda: None,
inputs=[],
outputs=[],
_js='function(){}'
)
if column is not None:
column.__exit__()
interfaces = [
(txt2img_interface, "txt2img", "txt2img"),
(img2img_interface, "img2img", "img2img"),
(extras_interface, "Extras", "extras"),
(pnginfo_interface, "PNG Info", "pnginfo"),
(modelmerger_interface, "Checkpoint Merger", "modelmerger"),
(settings_interface, "Settings", "settings"),
]
with open(os.path.join(script_path, "style.css"), "r", encoding="utf8") as file:
css = file.read()
if os.path.exists(os.path.join(script_path, "user.css")):
with open(os.path.join(script_path, "user.css"), "r", encoding="utf8") as file:
usercss = file.read()
css += usercss
if not cmd_opts.no_progressbar_hiding:
css += css_hide_progressbar
with gr.Blocks(css=css, analytics_enabled=False, title="Stable Diffusion") as demo:
with gr.Tabs() as tabs:
for interface, label, ifid in interfaces:
with gr.TabItem(label, id=ifid):
interface.render()
if os.path.exists(os.path.join(script_path, "notification.mp3")):
audio_notification = gr.Audio(interactive=False, value=os.path.join(script_path, "notification.mp3"), elem_id="audio_notification", visible=False)
text_settings = gr.Textbox(elem_id="settings_json", value=lambda: opts.dumpjson(), visible=False)
settings_submit.click(
fn=run_settings,
inputs=components,
outputs=[result, text_settings],
)
paste_field_names = ['Prompt', 'Negative prompt', 'Steps', 'Face restoration', 'Seed', 'Size-1', 'Size-2']
txt2img_fields = [field for field,name in txt2img_paste_fields if name in paste_field_names]
img2img_fields = [field for field,name in img2img_paste_fields if name in paste_field_names]
send_to_img2img.click(
fn=lambda img, *args: (image_from_url_text(img),*args),
_js="(gallery, ...args) => [extract_image_from_gallery_img2img(gallery), ...args]",
inputs=[txt2img_gallery] + txt2img_fields,
outputs=[init_img] + img2img_fields,
)
send_to_inpaint.click(
fn=lambda x, *args: (image_from_url_text(x), *args),
_js="(gallery, ...args) => [extract_image_from_gallery_inpaint(gallery), ...args]",
inputs=[txt2img_gallery] + txt2img_fields,
outputs=[init_img_with_mask] + img2img_fields,
)
img2img_send_to_img2img.click(
fn=lambda x: image_from_url_text(x),
_js="extract_image_from_gallery_img2img",
inputs=[img2img_gallery],
outputs=[init_img],
)
img2img_send_to_inpaint.click(
fn=lambda x: image_from_url_text(x),
_js="extract_image_from_gallery_inpaint",
inputs=[img2img_gallery],
outputs=[init_img_with_mask],
)
send_to_extras.click(
fn=lambda x: image_from_url_text(x),
_js="extract_image_from_gallery_extras",
inputs=[txt2img_gallery],
outputs=[extras_image],
)
img2img_send_to_extras.click(
fn=lambda x: image_from_url_text(x),
_js="extract_image_from_gallery_extras",
inputs=[img2img_gallery],
outputs=[extras_image],
)
modules.generation_parameters_copypaste.connect_paste(pnginfo_send_to_txt2img, txt2img_paste_fields, generation_info, 'switch_to_txt2img')
modules.generation_parameters_copypaste.connect_paste(pnginfo_send_to_img2img, img2img_paste_fields, generation_info, 'switch_to_img2img_img2img')
ui_config_file = cmd_opts.ui_config_file
ui_settings = {}
settings_count = len(ui_settings)
error_loading = False
try:
if os.path.exists(ui_config_file):
with open(ui_config_file, "r", encoding="utf8") as file:
ui_settings = json.load(file)
except Exception:
error_loading = True
print("Error loading settings:", file=sys.stderr)
print(traceback.format_exc(), file=sys.stderr)
def loadsave(path, x):
def apply_field(obj, field, condition=None):
key = path + "/" + field
if getattr(obj,'custom_script_source',None) is not None:
key = 'customscript/' + obj.custom_script_source + '/' + key
if getattr(obj, 'do_not_save_to_config', False):
return
saved_value = ui_settings.get(key, None)
if saved_value is None:
ui_settings[key] = getattr(obj, field)
elif condition is None or condition(saved_value):
setattr(obj, field, saved_value)
if type(x) in [gr.Slider, gr.Radio, gr.Checkbox, gr.Textbox, gr.Number] and x.visible:
apply_field(x, 'visible')
if type(x) == gr.Slider:
apply_field(x, 'value')
apply_field(x, 'minimum')
apply_field(x, 'maximum')
apply_field(x, 'step')
if type(x) == gr.Radio:
apply_field(x, 'value', lambda val: val in x.choices)
if type(x) == gr.Checkbox:
apply_field(x, 'value')
if type(x) == gr.Textbox:
apply_field(x, 'value')
if type(x) == gr.Number:
apply_field(x, 'value')
visit(txt2img_interface, loadsave, "txt2img")
visit(img2img_interface, loadsave, "img2img")
visit(extras_interface, loadsave, "extras")
if not error_loading and (not os.path.exists(ui_config_file) or settings_count != len(ui_settings)):
with open(ui_config_file, "w", encoding="utf8") as file:
json.dump(ui_settings, file, indent=4)
return demo
with open(os.path.join(script_path, "script.js"), "r", encoding="utf8") as jsfile:
javascript = f'<script>{jsfile.read()}</script>'
jsdir = os.path.join(script_path, "javascript")
for filename in sorted(os.listdir(jsdir)):
with open(os.path.join(jsdir, filename), "r", encoding="utf8") as jsfile:
javascript += f"\n<script>{jsfile.read()}</script>"
def template_response(*args, **kwargs):
res = gradio_routes_templates_response(*args, **kwargs)
res.body = res.body.replace(b'</head>', f'{javascript}</head>'.encode("utf8"))
res.init_headers()
return res
gradio_routes_templates_response = gradio.routes.templates.TemplateResponse
gradio.routes.templates.TemplateResponse = template_response
<|code_end|>
|
modules/ui.py
<|code_start|>import base64
import html
import io
import json
import math
import mimetypes
import os
import random
import sys
import time
import traceback
import numpy as np
import torch
from PIL import Image, PngImagePlugin
import gradio as gr
import gradio.utils
import gradio.routes
from modules.paths import script_path
from modules.shared import opts, cmd_opts
import modules.shared as shared
from modules.sd_samplers import samplers, samplers_for_img2img
from modules.sd_hijack import model_hijack
import modules.ldsr_model
import modules.scripts
import modules.gfpgan_model
import modules.codeformer_model
import modules.styles
import modules.generation_parameters_copypaste
# this is a fix for Windows users. Without it, javascript files will be served with text/html content-type and the bowser will not show any UI
mimetypes.init()
mimetypes.add_type('application/javascript', '.js')
if not cmd_opts.share and not cmd_opts.listen:
# fix gradio phoning home
gradio.utils.version_check = lambda: None
gradio.utils.get_local_ip_address = lambda: '127.0.0.1'
def gr_show(visible=True):
return {"visible": visible, "__type__": "update"}
sample_img2img = "assets/stable-samples/img2img/sketch-mountains-input.jpg"
sample_img2img = sample_img2img if os.path.exists(sample_img2img) else None
css_hide_progressbar = """
.wrap .m-12 svg { display:none!important; }
.wrap .m-12::before { content:"Loading..." }
.progress-bar { display:none!important; }
.meta-text { display:none!important; }
"""
# Using constants for these since the variation selector isn't visible.
# Important that they exactly match script.js for tooltip to work.
random_symbol = '\U0001f3b2\ufe0f' # 🎲️
reuse_symbol = '\u267b\ufe0f' # ♻️
art_symbol = '\U0001f3a8' # 🎨
paste_symbol = '\u2199\ufe0f' # ↙
def plaintext_to_html(text):
text = "<p>" + "<br>\n".join([f"{html.escape(x)}" for x in text.split('\n')]) + "</p>"
return text
def image_from_url_text(filedata):
if type(filedata) == list:
if len(filedata) == 0:
return None
filedata = filedata[0]
if filedata.startswith("data:image/png;base64,"):
filedata = filedata[len("data:image/png;base64,"):]
filedata = base64.decodebytes(filedata.encode('utf-8'))
image = Image.open(io.BytesIO(filedata))
return image
def send_gradio_gallery_to_image(x):
if len(x) == 0:
return None
return image_from_url_text(x[0])
def save_files(js_data, images, index):
import csv
os.makedirs(opts.outdir_save, exist_ok=True)
filenames = []
data = json.loads(js_data)
if index > -1 and opts.save_selected_only and (index >= data["index_of_first_image"]): # ensures we are looking at a specific non-grid picture, and we have save_selected_only
images = [images[index]]
infotexts = [data["infotexts"][index]]
else:
infotexts = data["infotexts"]
with open(os.path.join(opts.outdir_save, "log.csv"), "a", encoding="utf8", newline='') as file:
at_start = file.tell() == 0
writer = csv.writer(file)
if at_start:
writer.writerow(["prompt", "seed", "width", "height", "sampler", "cfgs", "steps", "filename", "negative_prompt"])
filename_base = str(int(time.time() * 1000))
for i, filedata in enumerate(images):
filename = filename_base + ("" if len(images) == 1 else "-" + str(i + 1)) + ".png"
filepath = os.path.join(opts.outdir_save, filename)
if filedata.startswith("data:image/png;base64,"):
filedata = filedata[len("data:image/png;base64,"):]
pnginfo = PngImagePlugin.PngInfo()
pnginfo.add_text('parameters', infotexts[i])
image = Image.open(io.BytesIO(base64.decodebytes(filedata.encode('utf-8'))))
image.save(filepath, quality=opts.jpeg_quality, pnginfo=pnginfo)
filenames.append(filename)
writer.writerow([data["prompt"], data["seed"], data["width"], data["height"], data["sampler"], data["cfg_scale"], data["steps"], filenames[0], data["negative_prompt"]])
return '', '', plaintext_to_html(f"Saved: {filenames[0]}")
def wrap_gradio_call(func):
def f(*args, **kwargs):
run_memmon = opts.memmon_poll_rate > 0 and not shared.mem_mon.disabled
if run_memmon:
shared.mem_mon.monitor()
t = time.perf_counter()
try:
res = list(func(*args, **kwargs))
except Exception as e:
print("Error completing request", file=sys.stderr)
print("Arguments:", args, kwargs, file=sys.stderr)
print(traceback.format_exc(), file=sys.stderr)
shared.state.job = ""
shared.state.job_count = 0
res = [None, '', f"<div class='error'>{plaintext_to_html(type(e).__name__+': '+str(e))}</div>"]
elapsed = time.perf_counter() - t
if run_memmon:
mem_stats = {k: -(v//-(1024*1024)) for k, v in shared.mem_mon.stop().items()}
active_peak = mem_stats['active_peak']
reserved_peak = mem_stats['reserved_peak']
sys_peak = mem_stats['system_peak']
sys_total = mem_stats['total']
sys_pct = round(sys_peak/max(sys_total, 1) * 100, 2)
vram_html = f"<p class='vram'>Torch active/reserved: {active_peak}/{reserved_peak} MiB, <wbr>Sys VRAM: {sys_peak}/{sys_total} MiB ({sys_pct}%)</p>"
else:
vram_html = ''
# last item is always HTML
res[-1] += f"<div class='performance'><p class='time'>Time taken: <wbr>{elapsed:.2f}s</p>{vram_html}</div>"
shared.state.interrupted = False
return tuple(res)
return f
def check_progress_call(id_part):
if shared.state.job_count == 0:
return "", gr_show(False), gr_show(False)
progress = 0
if shared.state.job_count > 0:
progress += shared.state.job_no / shared.state.job_count
if shared.state.sampling_steps > 0:
progress += 1 / shared.state.job_count * shared.state.sampling_step / shared.state.sampling_steps
progress = min(progress, 1)
progressbar = ""
if opts.show_progressbar:
progressbar = f"""<div class='progressDiv'><div class='progress' style="width:{progress * 100}%">{str(int(progress*100))+"%" if progress > 0.01 else ""}</div></div>"""
image = gr_show(False)
preview_visibility = gr_show(False)
if opts.show_progress_every_n_steps > 0:
if shared.parallel_processing_allowed:
if shared.state.sampling_step - shared.state.current_image_sampling_step >= opts.show_progress_every_n_steps and shared.state.current_latent is not None:
shared.state.current_image = modules.sd_samplers.sample_to_image(shared.state.current_latent)
shared.state.current_image_sampling_step = shared.state.sampling_step
image = shared.state.current_image
if image is None:
image = gr.update(value=None)
else:
preview_visibility = gr_show(True)
return f"<span id='{id_part}_progress_span' style='display: none'>{time.time()}</span><p>{progressbar}</p>", preview_visibility, image
def check_progress_call_initial(id_part):
shared.state.job_count = -1
shared.state.current_latent = None
shared.state.current_image = None
return check_progress_call(id_part)
def roll_artist(prompt):
allowed_cats = set([x for x in shared.artist_db.categories() if len(opts.random_artist_categories)==0 or x in opts.random_artist_categories])
artist = random.choice([x for x in shared.artist_db.artists if x.category in allowed_cats])
return prompt + ", " + artist.name if prompt != '' else artist.name
def visit(x, func, path=""):
if hasattr(x, 'children'):
for c in x.children:
visit(c, func, path)
elif x.label is not None:
func(path + "/" + str(x.label), x)
def add_style(name: str, prompt: str, negative_prompt: str):
if name is None:
return [gr_show(), gr_show()]
style = modules.styles.PromptStyle(name, prompt, negative_prompt)
shared.prompt_styles.styles[style.name] = style
# Save all loaded prompt styles: this allows us to update the storage format in the future more easily, because we
# reserialize all styles every time we save them
shared.prompt_styles.save_styles(shared.styles_filename)
return [gr.Dropdown.update(visible=True, choices=list(shared.prompt_styles.styles)) for _ in range(4)]
def apply_styles(prompt, prompt_neg, style1_name, style2_name):
prompt = shared.prompt_styles.apply_styles_to_prompt(prompt, [style1_name, style2_name])
prompt_neg = shared.prompt_styles.apply_negative_styles_to_prompt(prompt_neg, [style1_name, style2_name])
return [gr.Textbox.update(value=prompt), gr.Textbox.update(value=prompt_neg), gr.Dropdown.update(value="None"), gr.Dropdown.update(value="None")]
def interrogate(image):
prompt = shared.interrogator.interrogate(image)
return gr_show(True) if prompt is None else prompt
def create_seed_inputs():
with gr.Row():
with gr.Box():
with gr.Row(elem_id='seed_row'):
seed = (gr.Textbox if cmd_opts.use_textbox_seed else gr.Number)(label='Seed', value=-1)
seed.style(container=False)
random_seed = gr.Button(random_symbol, elem_id='random_seed')
reuse_seed = gr.Button(reuse_symbol, elem_id='reuse_seed')
with gr.Box(elem_id='subseed_show_box'):
seed_checkbox = gr.Checkbox(label='Extra', elem_id='subseed_show', value=False)
# Components to show/hide based on the 'Extra' checkbox
seed_extras = []
with gr.Row(visible=False) as seed_extra_row_1:
seed_extras.append(seed_extra_row_1)
with gr.Box():
with gr.Row(elem_id='subseed_row'):
subseed = gr.Number(label='Variation seed', value=-1)
subseed.style(container=False)
random_subseed = gr.Button(random_symbol, elem_id='random_subseed')
reuse_subseed = gr.Button(reuse_symbol, elem_id='reuse_subseed')
subseed_strength = gr.Slider(label='Variation strength', value=0.0, minimum=0, maximum=1, step=0.01)
with gr.Row(visible=False) as seed_extra_row_2:
seed_extras.append(seed_extra_row_2)
seed_resize_from_w = gr.Slider(minimum=0, maximum=2048, step=64, label="Resize seed from width", value=0)
seed_resize_from_h = gr.Slider(minimum=0, maximum=2048, step=64, label="Resize seed from height", value=0)
random_seed.click(fn=lambda: -1, show_progress=False, inputs=[], outputs=[seed])
random_subseed.click(fn=lambda: -1, show_progress=False, inputs=[], outputs=[subseed])
def change_visibility(show):
return {comp: gr_show(show) for comp in seed_extras}
seed_checkbox.change(change_visibility, show_progress=False, inputs=[seed_checkbox], outputs=seed_extras)
return seed, reuse_seed, subseed, reuse_subseed, subseed_strength, seed_resize_from_h, seed_resize_from_w, seed_checkbox
def connect_reuse_seed(seed: gr.Number, reuse_seed: gr.Button, generation_info: gr.Textbox, dummy_component, is_subseed):
""" Connects a 'reuse (sub)seed' button's click event so that it copies last used
(sub)seed value from generation info the to the seed field. If copying subseed and subseed strength
was 0, i.e. no variation seed was used, it copies the normal seed value instead."""
def copy_seed(gen_info_string: str, index):
res = -1
try:
gen_info = json.loads(gen_info_string)
index -= gen_info.get('index_of_first_image', 0)
if is_subseed and gen_info.get('subseed_strength', 0) > 0:
all_subseeds = gen_info.get('all_subseeds', [-1])
res = all_subseeds[index if 0 <= index < len(all_subseeds) else 0]
else:
all_seeds = gen_info.get('all_seeds', [-1])
res = all_seeds[index if 0 <= index < len(all_seeds) else 0]
except json.decoder.JSONDecodeError as e:
if gen_info_string != '':
print("Error parsing JSON generation info:", file=sys.stderr)
print(gen_info_string, file=sys.stderr)
return [res, gr_show(False)]
reuse_seed.click(
fn=copy_seed,
_js="(x, y) => [x, selected_gallery_index()]",
show_progress=False,
inputs=[generation_info, dummy_component],
outputs=[seed, dummy_component]
)
def update_token_counter(text):
tokens, token_count, max_length = model_hijack.tokenize(text)
style_class = ' class="red"' if (token_count > max_length) else ""
return f"<span {style_class}>{token_count}/{max_length}</span>"
def create_toprow(is_img2img):
id_part = "img2img" if is_img2img else "txt2img"
with gr.Row(elem_id="toprow"):
with gr.Column(scale=4):
with gr.Row():
with gr.Column(scale=80):
with gr.Row():
prompt = gr.Textbox(label="Prompt", elem_id=f"{id_part}_prompt", show_label=False, placeholder="Prompt", lines=2)
with gr.Column(scale=1, elem_id="roll_col"):
roll = gr.Button(value=art_symbol, elem_id="roll", visible=len(shared.artist_db.artists) > 0)
paste = gr.Button(value=paste_symbol, elem_id="paste")
token_counter = gr.HTML(value="<span></span>", elem_id=f"{id_part}_token_counter")
hidden_button = gr.Button(visible=False, elem_id=f"{id_part}_token_button")
hidden_button.click(fn=update_token_counter, inputs=[prompt], outputs=[token_counter])
with gr.Column(scale=10, elem_id="style_pos_col"):
prompt_style = gr.Dropdown(label="Style 1", elem_id=f"{id_part}_style_index", choices=[k for k, v in shared.prompt_styles.styles.items()], value=next(iter(shared.prompt_styles.styles.keys())), visible=len(shared.prompt_styles.styles) > 1)
with gr.Row():
with gr.Column(scale=8):
negative_prompt = gr.Textbox(label="Negative prompt", elem_id="negative_prompt", show_label=False, placeholder="Negative prompt", lines=2)
with gr.Column(scale=1, elem_id="style_neg_col"):
prompt_style2 = gr.Dropdown(label="Style 2", elem_id=f"{id_part}_style2_index", choices=[k for k, v in shared.prompt_styles.styles.items()], value=next(iter(shared.prompt_styles.styles.keys())), visible=len(shared.prompt_styles.styles) > 1)
with gr.Column(scale=1):
with gr.Row():
interrupt = gr.Button('Interrupt', elem_id=f"{id_part}_interrupt")
submit = gr.Button('Generate', elem_id="generate", variant='primary')
interrupt.click(
fn=lambda: shared.state.interrupt(),
inputs=[],
outputs=[],
)
with gr.Row():
if is_img2img:
interrogate = gr.Button('Interrogate', elem_id="interrogate")
else:
interrogate = None
prompt_style_apply = gr.Button('Apply style', elem_id="style_apply")
save_style = gr.Button('Create style', elem_id="style_create")
return prompt, roll, prompt_style, negative_prompt, prompt_style2, submit, interrogate, prompt_style_apply, save_style, paste
def setup_progressbar(progressbar, preview, id_part):
check_progress = gr.Button('Check progress', elem_id=f"{id_part}_check_progress", visible=False)
check_progress.click(
fn=lambda: check_progress_call(id_part),
show_progress=False,
inputs=[],
outputs=[progressbar, preview, preview],
)
check_progress_initial = gr.Button('Check progress (first)', elem_id=f"{id_part}_check_progress_initial", visible=False)
check_progress_initial.click(
fn=lambda: check_progress_call_initial(id_part),
show_progress=False,
inputs=[],
outputs=[progressbar, preview, preview],
)
def create_ui(txt2img, img2img, run_extras, run_pnginfo, run_modelmerger):
with gr.Blocks(analytics_enabled=False) as txt2img_interface:
txt2img_prompt, roll, txt2img_prompt_style, txt2img_negative_prompt, txt2img_prompt_style2, submit, _, txt2img_prompt_style_apply, txt2img_save_style, paste = create_toprow(is_img2img=False)
dummy_component = gr.Label(visible=False)
with gr.Row(elem_id='txt2img_progress_row'):
with gr.Column(scale=1):
pass
with gr.Column(scale=1):
progressbar = gr.HTML(elem_id="txt2img_progressbar")
txt2img_preview = gr.Image(elem_id='txt2img_preview', visible=False)
setup_progressbar(progressbar, txt2img_preview, 'txt2img')
with gr.Row().style(equal_height=False):
with gr.Column(variant='panel'):
steps = gr.Slider(minimum=1, maximum=150, step=1, label="Sampling Steps", value=20)
sampler_index = gr.Radio(label='Sampling method', elem_id="txt2img_sampling", choices=[x.name for x in samplers], value=samplers[0].name, type="index")
with gr.Group():
width = gr.Slider(minimum=64, maximum=2048, step=64, label="Width", value=512)
height = gr.Slider(minimum=64, maximum=2048, step=64, label="Height", value=512)
with gr.Row():
restore_faces = gr.Checkbox(label='Restore faces', value=False, visible=len(shared.face_restorers) > 1)
tiling = gr.Checkbox(label='Tiling', value=False)
enable_hr = gr.Checkbox(label='Highres. fix', value=False)
with gr.Row(visible=False) as hr_options:
scale_latent = gr.Checkbox(label='Scale latent', value=False)
denoising_strength = gr.Slider(minimum=0.0, maximum=1.0, step=0.01, label='Denoising strength', value=0.7)
with gr.Row():
batch_count = gr.Slider(minimum=1, maximum=cmd_opts.max_batch_count, step=1, label='Batch count', value=1)
batch_size = gr.Slider(minimum=1, maximum=8, step=1, label='Batch size', value=1)
cfg_scale = gr.Slider(minimum=1.0, maximum=30.0, step=0.5, label='CFG Scale', value=7.0)
seed, reuse_seed, subseed, reuse_subseed, subseed_strength, seed_resize_from_h, seed_resize_from_w, seed_checkbox = create_seed_inputs()
with gr.Group():
custom_inputs = modules.scripts.scripts_txt2img.setup_ui(is_img2img=False)
with gr.Column(variant='panel'):
with gr.Group():
txt2img_preview = gr.Image(elem_id='txt2img_preview', visible=False)
txt2img_gallery = gr.Gallery(label='Output', show_label=False, elem_id='txt2img_gallery').style(grid=4)
with gr.Group():
with gr.Row():
save = gr.Button('Save')
send_to_img2img = gr.Button('Send to img2img')
send_to_inpaint = gr.Button('Send to inpaint')
send_to_extras = gr.Button('Send to extras')
with gr.Group():
html_info = gr.HTML()
generation_info = gr.Textbox(visible=False)
connect_reuse_seed(seed, reuse_seed, generation_info, dummy_component, is_subseed=False)
connect_reuse_seed(subseed, reuse_subseed, generation_info, dummy_component, is_subseed=True)
txt2img_args = dict(
fn=txt2img,
_js="submit",
inputs=[
txt2img_prompt,
txt2img_negative_prompt,
txt2img_prompt_style,
txt2img_prompt_style2,
steps,
sampler_index,
restore_faces,
tiling,
batch_count,
batch_size,
cfg_scale,
seed,
subseed, subseed_strength, seed_resize_from_h, seed_resize_from_w, seed_checkbox,
height,
width,
enable_hr,
scale_latent,
denoising_strength,
] + custom_inputs,
outputs=[
txt2img_gallery,
generation_info,
html_info
],
show_progress=False,
)
txt2img_prompt.submit(**txt2img_args)
submit.click(**txt2img_args)
enable_hr.change(
fn=lambda x: gr_show(x),
inputs=[enable_hr],
outputs=[hr_options],
)
save.click(
fn=wrap_gradio_call(save_files),
_js="(x, y, z) => [x, y, selected_gallery_index()]",
inputs=[
generation_info,
txt2img_gallery,
html_info,
],
outputs=[
html_info,
html_info,
html_info,
]
)
roll.click(
fn=roll_artist,
inputs=[
txt2img_prompt,
],
outputs=[
txt2img_prompt,
]
)
txt2img_paste_fields = [
(txt2img_prompt, "Prompt"),
(txt2img_negative_prompt, "Negative prompt"),
(steps, "Steps"),
(sampler_index, "Sampler"),
(restore_faces, "Face restoration"),
(cfg_scale, "CFG scale"),
(seed, "Seed"),
(width, "Size-1"),
(height, "Size-2"),
(batch_size, "Batch size"),
(subseed, "Variation seed"),
(subseed_strength, "Variation seed strength"),
(seed_resize_from_w, "Seed resize from-1"),
(seed_resize_from_h, "Seed resize from-2"),
(denoising_strength, "Denoising strength"),
(enable_hr, lambda d: "Denoising strength" in d),
(hr_options, lambda d: gr.Row.update(visible="Denoising strength" in d)),
]
modules.generation_parameters_copypaste.connect_paste(paste, txt2img_paste_fields, txt2img_prompt)
with gr.Blocks(analytics_enabled=False) as img2img_interface:
img2img_prompt, roll, img2img_prompt_style, img2img_negative_prompt, img2img_prompt_style2, submit, img2img_interrogate, img2img_prompt_style_apply, img2img_save_style, paste = create_toprow(is_img2img=True)
with gr.Row(elem_id='img2img_progress_row'):
with gr.Column(scale=1):
pass
with gr.Column(scale=1):
progressbar = gr.HTML(elem_id="img2img_progressbar")
img2img_preview = gr.Image(elem_id='img2img_preview', visible=False)
setup_progressbar(progressbar, img2img_preview, 'img2img')
with gr.Row().style(equal_height=False):
with gr.Column(variant='panel'):
with gr.Tabs(elem_id="mode_img2img") as tabs_img2img_mode:
with gr.TabItem('img2img', id='img2img'):
init_img = gr.Image(label="Image for img2img", show_label=False, source="upload", interactive=True, type="pil")
with gr.TabItem('Inpaint', id='inpaint'):
init_img_with_mask = gr.Image(label="Image for inpainting with mask", show_label=False, elem_id="img2maskimg", source="upload", interactive=True, type="pil", tool="sketch", image_mode="RGBA")
init_img_inpaint = gr.Image(label="Image for img2img", show_label=False, source="upload", interactive=True, type="pil", visible=False, elem_id="img_inpaint_base")
init_mask_inpaint = gr.Image(label="Mask", source="upload", interactive=True, type="pil", visible=False, elem_id="img_inpaint_mask")
mask_blur = gr.Slider(label='Mask blur', minimum=0, maximum=64, step=1, value=4)
with gr.Row():
mask_mode = gr.Radio(label="Mask mode", show_label=False, choices=["Draw mask", "Upload mask"], type="index", value="Draw mask", elem_id="mask_mode")
inpainting_mask_invert = gr.Radio(label='Masking mode', show_label=False, choices=['Inpaint masked', 'Inpaint not masked'], value='Inpaint masked', type="index")
inpainting_fill = gr.Radio(label='Masked content', choices=['fill', 'original', 'latent noise', 'latent nothing'], value='fill', type="index")
with gr.Row():
inpaint_full_res = gr.Checkbox(label='Inpaint at full resolution', value=False)
inpaint_full_res_padding = gr.Slider(label='Inpaint at full resolution padding, pixels', minimum=0, maximum=256, step=4, value=32)
with gr.TabItem('Batch img2img', id='batch'):
hidden = '<br>Disabled when launched with --hide-ui-dir-config.' if shared.cmd_opts.hide_ui_dir_config else ''
gr.HTML(f"<p class=\"text-gray-500\">Process images in a directory on the same machine where the server is running.{hidden}</p>")
img2img_batch_input_dir = gr.Textbox(label="Input directory", **shared.hide_dirs)
img2img_batch_output_dir = gr.Textbox(label="Output directory", **shared.hide_dirs)
with gr.Row():
resize_mode = gr.Radio(label="Resize mode", elem_id="resize_mode", show_label=False, choices=["Just resize", "Crop and resize", "Resize and fill"], type="index", value="Just resize")
steps = gr.Slider(minimum=1, maximum=150, step=1, label="Sampling Steps", value=20)
sampler_index = gr.Radio(label='Sampling method', choices=[x.name for x in samplers_for_img2img], value=samplers_for_img2img[0].name, type="index")
with gr.Group():
width = gr.Slider(minimum=64, maximum=2048, step=64, label="Width", value=512)
height = gr.Slider(minimum=64, maximum=2048, step=64, label="Height", value=512)
with gr.Row():
restore_faces = gr.Checkbox(label='Restore faces', value=False, visible=len(shared.face_restorers) > 1)
tiling = gr.Checkbox(label='Tiling', value=False)
with gr.Row():
batch_count = gr.Slider(minimum=1, maximum=cmd_opts.max_batch_count, step=1, label='Batch count', value=1)
batch_size = gr.Slider(minimum=1, maximum=8, step=1, label='Batch size', value=1)
with gr.Group():
cfg_scale = gr.Slider(minimum=1.0, maximum=30.0, step=0.5, label='CFG Scale', value=7.0)
denoising_strength = gr.Slider(minimum=0.0, maximum=1.0, step=0.01, label='Denoising strength', value=0.75)
seed, reuse_seed, subseed, reuse_subseed, subseed_strength, seed_resize_from_h, seed_resize_from_w, seed_checkbox = create_seed_inputs()
with gr.Group():
custom_inputs = modules.scripts.scripts_img2img.setup_ui(is_img2img=True)
with gr.Column(variant='panel'):
with gr.Group():
img2img_preview = gr.Image(elem_id='img2img_preview', visible=False)
img2img_gallery = gr.Gallery(label='Output', show_label=False, elem_id='img2img_gallery').style(grid=4)
with gr.Group():
with gr.Row():
save = gr.Button('Save')
img2img_send_to_img2img = gr.Button('Send to img2img')
img2img_send_to_inpaint = gr.Button('Send to inpaint')
img2img_send_to_extras = gr.Button('Send to extras')
with gr.Group():
html_info = gr.HTML()
generation_info = gr.Textbox(visible=False)
connect_reuse_seed(seed, reuse_seed, generation_info, dummy_component, is_subseed=False)
connect_reuse_seed(subseed, reuse_subseed, generation_info, dummy_component, is_subseed=True)
mask_mode.change(
lambda mode, img: {
init_img_with_mask: gr_show(mode == 0),
init_img_inpaint: gr_show(mode == 1),
init_mask_inpaint: gr_show(mode == 1),
},
inputs=[mask_mode, init_img_with_mask],
outputs=[
init_img_with_mask,
init_img_inpaint,
init_mask_inpaint,
],
)
img2img_args = dict(
fn=img2img,
_js="submit_img2img",
inputs=[
dummy_component,
img2img_prompt,
img2img_negative_prompt,
img2img_prompt_style,
img2img_prompt_style2,
init_img,
init_img_with_mask,
init_img_inpaint,
init_mask_inpaint,
mask_mode,
steps,
sampler_index,
mask_blur,
inpainting_fill,
restore_faces,
tiling,
batch_count,
batch_size,
cfg_scale,
denoising_strength,
seed,
subseed, subseed_strength, seed_resize_from_h, seed_resize_from_w, seed_checkbox,
height,
width,
resize_mode,
inpaint_full_res,
inpaint_full_res_padding,
inpainting_mask_invert,
img2img_batch_input_dir,
img2img_batch_output_dir,
] + custom_inputs,
outputs=[
img2img_gallery,
generation_info,
html_info
],
show_progress=False,
)
img2img_prompt.submit(**img2img_args)
submit.click(**img2img_args)
img2img_interrogate.click(
fn=interrogate,
inputs=[init_img],
outputs=[img2img_prompt],
)
save.click(
fn=wrap_gradio_call(save_files),
_js="(x, y, z) => [x, y, selected_gallery_index()]",
inputs=[
generation_info,
img2img_gallery,
html_info
],
outputs=[
html_info,
html_info,
html_info,
]
)
roll.click(
fn=roll_artist,
inputs=[
img2img_prompt,
],
outputs=[
img2img_prompt,
]
)
prompts = [(txt2img_prompt, txt2img_negative_prompt), (img2img_prompt, img2img_negative_prompt)]
style_dropdowns = [(txt2img_prompt_style, txt2img_prompt_style2), (img2img_prompt_style, img2img_prompt_style2)]
for button, (prompt, negative_prompt) in zip([txt2img_save_style, img2img_save_style], prompts):
button.click(
fn=add_style,
_js="ask_for_style_name",
# Have to pass empty dummy component here, because the JavaScript and Python function have to accept
# the same number of parameters, but we only know the style-name after the JavaScript prompt
inputs=[dummy_component, prompt, negative_prompt],
outputs=[txt2img_prompt_style, img2img_prompt_style, txt2img_prompt_style2, img2img_prompt_style2],
)
for button, (prompt, negative_prompt), (style1, style2) in zip([txt2img_prompt_style_apply, img2img_prompt_style_apply], prompts, style_dropdowns):
button.click(
fn=apply_styles,
inputs=[prompt, negative_prompt, style1, style2],
outputs=[prompt, negative_prompt, style1, style2],
)
img2img_paste_fields = [
(img2img_prompt, "Prompt"),
(img2img_negative_prompt, "Negative prompt"),
(steps, "Steps"),
(sampler_index, "Sampler"),
(restore_faces, "Face restoration"),
(cfg_scale, "CFG scale"),
(seed, "Seed"),
(width, "Size-1"),
(height, "Size-2"),
(batch_size, "Batch size"),
(subseed, "Variation seed"),
(subseed_strength, "Variation seed strength"),
(seed_resize_from_w, "Seed resize from-1"),
(seed_resize_from_h, "Seed resize from-2"),
(denoising_strength, "Denoising strength"),
]
modules.generation_parameters_copypaste.connect_paste(paste, img2img_paste_fields, img2img_prompt)
with gr.Blocks(analytics_enabled=False) as extras_interface:
with gr.Row().style(equal_height=False):
with gr.Column(variant='panel'):
with gr.Tabs(elem_id="mode_extras"):
with gr.TabItem('Single Image'):
extras_image = gr.Image(label="Source", source="upload", interactive=True, type="pil")
with gr.TabItem('Batch Process'):
image_batch = gr.File(label="Batch Process", file_count="multiple", interactive=True, type="file")
upscaling_resize = gr.Slider(minimum=1.0, maximum=4.0, step=0.05, label="Resize", value=2)
with gr.Group():
extras_upscaler_1 = gr.Radio(label='Upscaler 1', choices=[x.name for x in shared.sd_upscalers], value=shared.sd_upscalers[0].name, type="index")
with gr.Group():
extras_upscaler_2 = gr.Radio(label='Upscaler 2', choices=[x.name for x in shared.sd_upscalers], value=shared.sd_upscalers[0].name, type="index")
extras_upscaler_2_visibility = gr.Slider(minimum=0.0, maximum=1.0, step=0.001, label="Upscaler 2 visibility", value=1)
with gr.Group():
gfpgan_visibility = gr.Slider(minimum=0.0, maximum=1.0, step=0.001, label="GFPGAN visibility", value=0, interactive=modules.gfpgan_model.have_gfpgan)
with gr.Group():
codeformer_visibility = gr.Slider(minimum=0.0, maximum=1.0, step=0.001, label="CodeFormer visibility", value=0, interactive=modules.codeformer_model.have_codeformer)
codeformer_weight = gr.Slider(minimum=0.0, maximum=1.0, step=0.001, label="CodeFormer weight (0 = maximum effect, 1 = minimum effect)", value=0, interactive=modules.codeformer_model.have_codeformer)
submit = gr.Button('Generate', elem_id="extras_generate", variant='primary')
with gr.Column(variant='panel'):
result_images = gr.Gallery(label="Result", show_label=False)
html_info_x = gr.HTML()
html_info = gr.HTML()
extras_send_to_img2img = gr.Button('Send to img2img')
extras_send_to_inpaint = gr.Button('Send to inpaint')
submit.click(
fn=run_extras,
_js="get_extras_tab_index",
inputs=[
dummy_component,
extras_image,
image_batch,
gfpgan_visibility,
codeformer_visibility,
codeformer_weight,
upscaling_resize,
extras_upscaler_1,
extras_upscaler_2,
extras_upscaler_2_visibility,
],
outputs=[
result_images,
html_info_x,
html_info,
]
)
extras_send_to_img2img.click(
fn=lambda x: image_from_url_text(x),
_js="extract_image_from_gallery_img2img",
inputs=[result_images],
outputs=[init_img],
)
extras_send_to_inpaint.click(
fn=lambda x: image_from_url_text(x),
_js="extract_image_from_gallery_img2img",
inputs=[result_images],
outputs=[init_img_with_mask],
)
with gr.Blocks(analytics_enabled=False) as pnginfo_interface:
with gr.Row().style(equal_height=False):
with gr.Column(variant='panel'):
image = gr.Image(elem_id="pnginfo_image", label="Source", source="upload", interactive=True, type="pil")
with gr.Column(variant='panel'):
html = gr.HTML()
generation_info = gr.Textbox(visible=False)
html2 = gr.HTML()
with gr.Row():
pnginfo_send_to_txt2img = gr.Button('Send to txt2img')
pnginfo_send_to_img2img = gr.Button('Send to img2img')
image.change(
fn=wrap_gradio_call(run_pnginfo),
inputs=[image],
outputs=[html, generation_info, html2],
)
with gr.Blocks() as modelmerger_interface:
with gr.Row().style(equal_height=False):
with gr.Column(variant='panel'):
gr.HTML(value="<p>A merger of the two checkpoints will be generated in your <b>checkpoint</b> directory.</p>")
with gr.Row():
ckpt_name_list = sorted([x.title for x in modules.sd_models.checkpoints_list.values()])
primary_model_name = gr.Dropdown(ckpt_name_list, elem_id="modelmerger_primary_model_name", label="Primary Model Name")
secondary_model_name = gr.Dropdown(ckpt_name_list, elem_id="modelmerger_secondary_model_name", label="Secondary Model Name")
interp_amount = gr.Slider(minimum=0.0, maximum=1.0, step=0.05, label='Interpolation Amount', value=0.3)
interp_method = gr.Radio(choices=["Weighted Sum", "Sigmoid", "Inverse Sigmoid"], value="Weighted Sum", label="Interpolation Method")
submit = gr.Button(elem_id="modelmerger_merge", label="Merge", variant='primary')
with gr.Column(variant='panel'):
submit_result = gr.Textbox(elem_id="modelmerger_result", show_label=False)
submit.click(
fn=run_modelmerger,
inputs=[
primary_model_name,
secondary_model_name,
interp_method,
interp_amount
],
outputs=[
submit_result,
]
)
def create_setting_component(key):
def fun():
return opts.data[key] if key in opts.data else opts.data_labels[key].default
info = opts.data_labels[key]
t = type(info.default)
args = info.component_args() if callable(info.component_args) else info.component_args
if info.component is not None:
comp = info.component
elif t == str:
comp = gr.Textbox
elif t == int:
comp = gr.Number
elif t == bool:
comp = gr.Checkbox
else:
raise Exception(f'bad options item type: {str(t)} for key {key}')
return comp(label=info.label, value=fun, **(args or {}))
components = []
def run_settings(*args):
changed = 0
for key, value, comp in zip(opts.data_labels.keys(), args, components):
if not opts.same_type(value, opts.data_labels[key].default):
return f"Bad value for setting {key}: {value}; expecting {type(opts.data_labels[key].default).__name__}"
for key, value, comp in zip(opts.data_labels.keys(), args, components):
comp_args = opts.data_labels[key].component_args
if comp_args and isinstance(comp_args, dict) and comp_args.get('visible') is False:
continue
oldval = opts.data.get(key, None)
opts.data[key] = value
if oldval != value:
if opts.data_labels[key].onchange is not None:
opts.data_labels[key].onchange()
changed += 1
opts.save(shared.config_filename)
return f'{changed} settings changed.', opts.dumpjson()
with gr.Blocks(analytics_enabled=False) as settings_interface:
settings_submit = gr.Button(value="Apply settings", variant='primary')
result = gr.HTML()
settings_cols = 3
items_per_col = int(len(opts.data_labels) * 0.9 / settings_cols)
cols_displayed = 0
items_displayed = 0
previous_section = None
column = None
with gr.Row(elem_id="settings").style(equal_height=False):
for i, (k, item) in enumerate(opts.data_labels.items()):
if previous_section != item.section:
if cols_displayed < settings_cols and (items_displayed >= items_per_col or previous_section is None):
if column is not None:
column.__exit__()
column = gr.Column(variant='panel')
column.__enter__()
items_displayed = 0
cols_displayed += 1
previous_section = item.section
gr.HTML(elem_id="settings_header_text_{}".format(item.section[0]), value='<h1 class="gr-button-lg">{}</h1>'.format(item.section[1]))
components.append(create_setting_component(k))
items_displayed += 1
request_notifications = gr.Button(value='Request browser notifications', elem_id="request_notifications")
request_notifications.click(
fn=lambda: None,
inputs=[],
outputs=[],
_js='function(){}'
)
if column is not None:
column.__exit__()
interfaces = [
(txt2img_interface, "txt2img", "txt2img"),
(img2img_interface, "img2img", "img2img"),
(extras_interface, "Extras", "extras"),
(pnginfo_interface, "PNG Info", "pnginfo"),
(modelmerger_interface, "Checkpoint Merger", "modelmerger"),
(settings_interface, "Settings", "settings"),
]
with open(os.path.join(script_path, "style.css"), "r", encoding="utf8") as file:
css = file.read()
if os.path.exists(os.path.join(script_path, "user.css")):
with open(os.path.join(script_path, "user.css"), "r", encoding="utf8") as file:
usercss = file.read()
css += usercss
if not cmd_opts.no_progressbar_hiding:
css += css_hide_progressbar
with gr.Blocks(css=css, analytics_enabled=False, title="Stable Diffusion") as demo:
with gr.Tabs() as tabs:
for interface, label, ifid in interfaces:
with gr.TabItem(label, id=ifid):
interface.render()
if os.path.exists(os.path.join(script_path, "notification.mp3")):
audio_notification = gr.Audio(interactive=False, value=os.path.join(script_path, "notification.mp3"), elem_id="audio_notification", visible=False)
text_settings = gr.Textbox(elem_id="settings_json", value=lambda: opts.dumpjson(), visible=False)
settings_submit.click(
fn=run_settings,
inputs=components,
outputs=[result, text_settings],
)
paste_field_names = ['Prompt', 'Negative prompt', 'Steps', 'Face restoration', 'Seed', 'Size-1', 'Size-2']
txt2img_fields = [field for field,name in txt2img_paste_fields if name in paste_field_names]
img2img_fields = [field for field,name in img2img_paste_fields if name in paste_field_names]
send_to_img2img.click(
fn=lambda img, *args: (image_from_url_text(img),*args),
_js="(gallery, ...args) => [extract_image_from_gallery_img2img(gallery), ...args]",
inputs=[txt2img_gallery] + txt2img_fields,
outputs=[init_img] + img2img_fields,
)
send_to_inpaint.click(
fn=lambda x, *args: (image_from_url_text(x), *args),
_js="(gallery, ...args) => [extract_image_from_gallery_inpaint(gallery), ...args]",
inputs=[txt2img_gallery] + txt2img_fields,
outputs=[init_img_with_mask] + img2img_fields,
)
img2img_send_to_img2img.click(
fn=lambda x: image_from_url_text(x),
_js="extract_image_from_gallery_img2img",
inputs=[img2img_gallery],
outputs=[init_img],
)
img2img_send_to_inpaint.click(
fn=lambda x: image_from_url_text(x),
_js="extract_image_from_gallery_inpaint",
inputs=[img2img_gallery],
outputs=[init_img_with_mask],
)
send_to_extras.click(
fn=lambda x: image_from_url_text(x),
_js="extract_image_from_gallery_extras",
inputs=[txt2img_gallery],
outputs=[extras_image],
)
img2img_send_to_extras.click(
fn=lambda x: image_from_url_text(x),
_js="extract_image_from_gallery_extras",
inputs=[img2img_gallery],
outputs=[extras_image],
)
modules.generation_parameters_copypaste.connect_paste(pnginfo_send_to_txt2img, txt2img_paste_fields, generation_info, 'switch_to_txt2img')
modules.generation_parameters_copypaste.connect_paste(pnginfo_send_to_img2img, img2img_paste_fields, generation_info, 'switch_to_img2img_img2img')
ui_config_file = cmd_opts.ui_config_file
ui_settings = {}
settings_count = len(ui_settings)
error_loading = False
try:
if os.path.exists(ui_config_file):
with open(ui_config_file, "r", encoding="utf8") as file:
ui_settings = json.load(file)
except Exception:
error_loading = True
print("Error loading settings:", file=sys.stderr)
print(traceback.format_exc(), file=sys.stderr)
def loadsave(path, x):
def apply_field(obj, field, condition=None):
key = path + "/" + field
if getattr(obj,'custom_script_source',None) is not None:
key = 'customscript/' + obj.custom_script_source + '/' + key
if getattr(obj, 'do_not_save_to_config', False):
return
saved_value = ui_settings.get(key, None)
if saved_value is None:
ui_settings[key] = getattr(obj, field)
elif condition is None or condition(saved_value):
setattr(obj, field, saved_value)
if type(x) in [gr.Slider, gr.Radio, gr.Checkbox, gr.Textbox, gr.Number] and x.visible:
apply_field(x, 'visible')
if type(x) == gr.Slider:
apply_field(x, 'value')
apply_field(x, 'minimum')
apply_field(x, 'maximum')
apply_field(x, 'step')
if type(x) == gr.Radio:
apply_field(x, 'value', lambda val: val in x.choices)
if type(x) == gr.Checkbox:
apply_field(x, 'value')
if type(x) == gr.Textbox:
apply_field(x, 'value')
if type(x) == gr.Number:
apply_field(x, 'value')
visit(txt2img_interface, loadsave, "txt2img")
visit(img2img_interface, loadsave, "img2img")
visit(extras_interface, loadsave, "extras")
if not error_loading and (not os.path.exists(ui_config_file) or settings_count != len(ui_settings)):
with open(ui_config_file, "w", encoding="utf8") as file:
json.dump(ui_settings, file, indent=4)
return demo
with open(os.path.join(script_path, "script.js"), "r", encoding="utf8") as jsfile:
javascript = f'<script>{jsfile.read()}</script>'
jsdir = os.path.join(script_path, "javascript")
for filename in sorted(os.listdir(jsdir)):
with open(os.path.join(jsdir, filename), "r", encoding="utf8") as jsfile:
javascript += f"\n<script>{jsfile.read()}</script>"
def template_response(*args, **kwargs):
res = gradio_routes_templates_response(*args, **kwargs)
res.body = res.body.replace(b'</head>', f'{javascript}</head>'.encode("utf8"))
res.init_headers()
return res
gradio_routes_templates_response = gradio.routes.templates.TemplateResponse
gradio.routes.templates.TemplateResponse = template_response
<|code_end|>
|
X/Y plot does not switch between models whose names starts with the same phrase
**Describe the bug**
X/Y plot does not switch between models whose names starts with the same phrase.
I have 3 models named:
model-wd.ckpt
model-wd-pruned.ckpt
model-wd-pruned-model-cn-poster-merged.ckpt
I can manually switch between them in settings, but if I'm using X/Y plot script it uses model with the longest name (model-wd-pruned-model-cn-poster-merged.ckpt)
**To Reproduce**
Steps to reproduce the behavior:
1. You should have several models whose names starts with the same phrase in models folder
2. Start WebUI
3. Select "X/Y Plot" as a script
4. Select "Checkpoint name" as X or Y type and enter the checkpoints names
5. Press "Generate"
6. See the same picture for all models whose names starts with the same phrase
**Expected behavior**
X/Y Plot script should switch between all models provided in "Checkpoint name" field
**Screenshots**
Settings I use for X/Y Plot:

For some reason it loads other checkpoints after generating all images:

Result:

Same settings, but switching models manually
model-wd:

model-wd-pruned:

model-wd-pruned-model-cn-poster-merged:

**Desktop:**
- OS: Windows
- Browser: Chrome
- Commit revision: f2a4a2c3a672e22f088a7455d6039557370dd3f2
|
modules/sd_models.py
<|code_start|>import glob
import os.path
import sys
from collections import namedtuple
import torch
from omegaconf import OmegaConf
from ldm.util import instantiate_from_config
from modules import shared
CheckpointInfo = namedtuple("CheckpointInfo", ['filename', 'title', 'hash', 'model_name'])
checkpoints_list = {}
try:
# this silences the annoying "Some weights of the model checkpoint were not used when initializing..." message at start.
from transformers import logging
logging.set_verbosity_error()
except Exception:
pass
def list_models():
checkpoints_list.clear()
model_dir = os.path.abspath(shared.cmd_opts.ckpt_dir)
def modeltitle(path, h):
abspath = os.path.abspath(path)
if abspath.startswith(model_dir):
name = abspath.replace(model_dir, '')
else:
name = os.path.basename(path)
if name.startswith("\\") or name.startswith("/"):
name = name[1:]
return f'{name} [{h}]'
cmd_ckpt = shared.cmd_opts.ckpt
if os.path.exists(cmd_ckpt):
h = model_hash(cmd_ckpt)
title = modeltitle(cmd_ckpt, h)
model_name = title.rsplit(".",1)[0] # remove extension if present
checkpoints_list[title] = CheckpointInfo(cmd_ckpt, title, h, model_name)
elif cmd_ckpt is not None and cmd_ckpt != shared.default_sd_model_file:
print(f"Checkpoint in --ckpt argument not found: {cmd_ckpt}", file=sys.stderr)
if os.path.exists(model_dir):
for filename in glob.glob(model_dir + '/**/*.ckpt', recursive=True):
h = model_hash(filename)
title = modeltitle(filename, h)
model_name = title.rsplit(".",1)[0] # remove extension if present
checkpoints_list[title] = CheckpointInfo(filename, title, h, model_name)
def model_hash(filename):
try:
with open(filename, "rb") as file:
import hashlib
m = hashlib.sha256()
file.seek(0x100000)
m.update(file.read(0x10000))
return m.hexdigest()[0:8]
except FileNotFoundError:
return 'NOFILE'
def select_checkpoint():
model_checkpoint = shared.opts.sd_model_checkpoint
checkpoint_info = checkpoints_list.get(model_checkpoint, None)
if checkpoint_info is not None:
return checkpoint_info
if len(checkpoints_list) == 0:
print(f"No checkpoints found. When searching for checkpoints, looked at:", file=sys.stderr)
print(f" - file {os.path.abspath(shared.cmd_opts.ckpt)}", file=sys.stderr)
print(f" - directory {os.path.abspath(shared.cmd_opts.ckpt_dir)}", file=sys.stderr)
print(f"Can't run without a checkpoint. Find and place a .ckpt file into any of those locations. The program will exit.", file=sys.stderr)
exit(1)
checkpoint_info = next(iter(checkpoints_list.values()))
if model_checkpoint is not None:
print(f"Checkpoint {model_checkpoint} not found; loading fallback {checkpoint_info.title}", file=sys.stderr)
return checkpoint_info
def load_model_weights(model, checkpoint_file, sd_model_hash):
print(f"Loading weights [{sd_model_hash}] from {checkpoint_file}")
pl_sd = torch.load(checkpoint_file, map_location="cpu")
if "global_step" in pl_sd:
print(f"Global Step: {pl_sd['global_step']}")
sd = pl_sd["state_dict"]
model.load_state_dict(sd, strict=False)
if shared.cmd_opts.opt_channelslast:
model.to(memory_format=torch.channels_last)
if not shared.cmd_opts.no_half:
model.half()
model.sd_model_hash = sd_model_hash
model.sd_model_checkpint = checkpoint_file
def load_model():
from modules import lowvram, sd_hijack
checkpoint_info = select_checkpoint()
sd_config = OmegaConf.load(shared.cmd_opts.config)
sd_model = instantiate_from_config(sd_config.model)
load_model_weights(sd_model, checkpoint_info.filename, checkpoint_info.hash)
if shared.cmd_opts.lowvram or shared.cmd_opts.medvram:
lowvram.setup_for_low_vram(sd_model, shared.cmd_opts.medvram)
else:
sd_model.to(shared.device)
sd_hijack.model_hijack.hijack(sd_model)
sd_model.eval()
print(f"Model loaded.")
return sd_model
def reload_model_weights(sd_model, info=None):
from modules import lowvram, devices
checkpoint_info = info or select_checkpoint()
if sd_model.sd_model_checkpint == checkpoint_info.filename:
return
if shared.cmd_opts.lowvram or shared.cmd_opts.medvram:
lowvram.send_everything_to_cpu()
else:
sd_model.to(devices.cpu)
load_model_weights(sd_model, checkpoint_info.filename, checkpoint_info.hash)
if not shared.cmd_opts.lowvram and not shared.cmd_opts.medvram:
sd_model.to(devices.device)
print(f"Weights loaded.")
return sd_model
<|code_end|>
scripts/xy_grid.py
<|code_start|>from collections import namedtuple
from copy import copy
import random
from PIL import Image
import numpy as np
import modules.scripts as scripts
import gradio as gr
from modules import images
from modules.processing import process_images, Processed
from modules.shared import opts, cmd_opts, state
import modules.shared as shared
import modules.sd_samplers
import modules.sd_models
import re
def apply_field(field):
def fun(p, x, xs):
setattr(p, field, x)
return fun
def apply_prompt(p, x, xs):
p.prompt = p.prompt.replace(xs[0], x)
p.negative_prompt = p.negative_prompt.replace(xs[0], x)
samplers_dict = {}
for i, sampler in enumerate(modules.sd_samplers.samplers):
samplers_dict[sampler.name.lower()] = i
for alias in sampler.aliases:
samplers_dict[alias.lower()] = i
def apply_sampler(p, x, xs):
sampler_index = samplers_dict.get(x.lower(), None)
if sampler_index is None:
raise RuntimeError(f"Unknown sampler: {x}")
p.sampler_index = sampler_index
def apply_checkpoint(p, x, xs):
applicable = [info for info in modules.sd_models.checkpoints_list.values() if x in info.title]
assert len(applicable) > 0, f'Checkpoint {x} for found'
info = applicable[0]
modules.sd_models.reload_model_weights(shared.sd_model, info)
def format_value_add_label(p, opt, x):
if type(x) == float:
x = round(x, 8)
return f"{opt.label}: {x}"
def format_value(p, opt, x):
if type(x) == float:
x = round(x, 8)
return x
def do_nothing(p, x, xs):
pass
def format_nothing(p, opt, x):
return ""
AxisOption = namedtuple("AxisOption", ["label", "type", "apply", "format_value"])
AxisOptionImg2Img = namedtuple("AxisOptionImg2Img", ["label", "type", "apply", "format_value"])
axis_options = [
AxisOption("Nothing", str, do_nothing, format_nothing),
AxisOption("Seed", int, apply_field("seed"), format_value_add_label),
AxisOption("Var. seed", int, apply_field("subseed"), format_value_add_label),
AxisOption("Var. strength", float, apply_field("subseed_strength"), format_value_add_label),
AxisOption("Steps", int, apply_field("steps"), format_value_add_label),
AxisOption("CFG Scale", float, apply_field("cfg_scale"), format_value_add_label),
AxisOption("Prompt S/R", str, apply_prompt, format_value),
AxisOption("Sampler", str, apply_sampler, format_value),
AxisOption("Checkpoint name", str, apply_checkpoint, format_value),
AxisOption("Sigma Churn", float, apply_field("s_churn"), format_value_add_label),
AxisOption("Sigma min", float, apply_field("s_tmin"), format_value_add_label),
AxisOption("Sigma max", float, apply_field("s_tmax"), format_value_add_label),
AxisOption("Sigma noise", float, apply_field("s_noise"), format_value_add_label),
AxisOption("Eta", float, apply_field("eta"), format_value_add_label),
AxisOptionImg2Img("Denoising", float, apply_field("denoising_strength"), format_value_add_label), # as it is now all AxisOptionImg2Img items must go after AxisOption ones
]
def draw_xy_grid(p, xs, ys, x_labels, y_labels, cell, draw_legend):
res = []
ver_texts = [[images.GridAnnotation(y)] for y in y_labels]
hor_texts = [[images.GridAnnotation(x)] for x in x_labels]
first_pocessed = None
state.job_count = len(xs) * len(ys) * p.n_iter
for iy, y in enumerate(ys):
for ix, x in enumerate(xs):
state.job = f"{ix + iy * len(xs) + 1} out of {len(xs) * len(ys)}"
processed = cell(x, y)
if first_pocessed is None:
first_pocessed = processed
try:
res.append(processed.images[0])
except:
res.append(Image.new(res[0].mode, res[0].size))
grid = images.image_grid(res, rows=len(ys))
if draw_legend:
grid = images.draw_grid_annotations(grid, res[0].width, res[0].height, hor_texts, ver_texts)
first_pocessed.images = [grid]
return first_pocessed
re_range = re.compile(r"\s*([+-]?\s*\d+)\s*-\s*([+-]?\s*\d+)(?:\s*\(([+-]\d+)\s*\))?\s*")
re_range_float = re.compile(r"\s*([+-]?\s*\d+(?:.\d*)?)\s*-\s*([+-]?\s*\d+(?:.\d*)?)(?:\s*\(([+-]\d+(?:.\d*)?)\s*\))?\s*")
re_range_count = re.compile(r"\s*([+-]?\s*\d+)\s*-\s*([+-]?\s*\d+)(?:\s*\[(\d+)\s*\])?\s*")
re_range_count_float = re.compile(r"\s*([+-]?\s*\d+(?:.\d*)?)\s*-\s*([+-]?\s*\d+(?:.\d*)?)(?:\s*\[(\d+(?:.\d*)?)\s*\])?\s*")
class Script(scripts.Script):
def title(self):
return "X/Y plot"
def ui(self, is_img2img):
current_axis_options = [x for x in axis_options if type(x) == AxisOption or type(x) == AxisOptionImg2Img and is_img2img]
with gr.Row():
x_type = gr.Dropdown(label="X type", choices=[x.label for x in current_axis_options], value=current_axis_options[1].label, visible=False, type="index", elem_id="x_type")
x_values = gr.Textbox(label="X values", visible=False, lines=1)
with gr.Row():
y_type = gr.Dropdown(label="Y type", choices=[x.label for x in current_axis_options], value=current_axis_options[4].label, visible=False, type="index", elem_id="y_type")
y_values = gr.Textbox(label="Y values", visible=False, lines=1)
draw_legend = gr.Checkbox(label='Draw legend', value=True)
no_fixed_seeds = gr.Checkbox(label='Keep -1 for seeds', value=False)
return [x_type, x_values, y_type, y_values, draw_legend, no_fixed_seeds]
def run(self, p, x_type, x_values, y_type, y_values, draw_legend, no_fixed_seeds):
modules.processing.fix_seed(p)
p.batch_size = 1
def process_axis(opt, vals):
valslist = [x.strip() for x in vals.split(",")]
if opt.type == int:
valslist_ext = []
for val in valslist:
m = re_range.fullmatch(val)
mc = re_range_count.fullmatch(val)
if m is not None:
start = int(m.group(1))
end = int(m.group(2))+1
step = int(m.group(3)) if m.group(3) is not None else 1
valslist_ext += list(range(start, end, step))
elif mc is not None:
start = int(mc.group(1))
end = int(mc.group(2))
num = int(mc.group(3)) if mc.group(3) is not None else 1
valslist_ext += [int(x) for x in np.linspace(start=start, stop=end, num=num).tolist()]
else:
valslist_ext.append(val)
valslist = valslist_ext
elif opt.type == float:
valslist_ext = []
for val in valslist:
m = re_range_float.fullmatch(val)
mc = re_range_count_float.fullmatch(val)
if m is not None:
start = float(m.group(1))
end = float(m.group(2))
step = float(m.group(3)) if m.group(3) is not None else 1
valslist_ext += np.arange(start, end + step, step).tolist()
elif mc is not None:
start = float(mc.group(1))
end = float(mc.group(2))
num = int(mc.group(3)) if mc.group(3) is not None else 1
valslist_ext += np.linspace(start=start, stop=end, num=num).tolist()
else:
valslist_ext.append(val)
valslist = valslist_ext
valslist = [opt.type(x) for x in valslist]
return valslist
x_opt = axis_options[x_type]
xs = process_axis(x_opt, x_values)
y_opt = axis_options[y_type]
ys = process_axis(y_opt, y_values)
def fix_axis_seeds(axis_opt, axis_list):
if axis_opt.label == 'Seed':
return [int(random.randrange(4294967294)) if val is None or val == '' or val == -1 else val for val in axis_list]
else:
return axis_list
if not no_fixed_seeds:
xs = fix_axis_seeds(x_opt, xs)
ys = fix_axis_seeds(y_opt, ys)
if x_opt.label == 'Steps':
total_steps = sum(xs) * len(ys)
elif y_opt.label == 'Steps':
total_steps = sum(ys) * len(xs)
else:
total_steps = p.steps * len(xs) * len(ys)
print(f"X/Y plot will create {len(xs) * len(ys) * p.n_iter} images on a {len(xs)}x{len(ys)} grid. (Total steps to process: {total_steps * p.n_iter})")
shared.total_tqdm.updateTotal(total_steps * p.n_iter)
def cell(x, y):
pc = copy(p)
x_opt.apply(pc, x, xs)
y_opt.apply(pc, y, ys)
return process_images(pc)
processed = draw_xy_grid(
p,
xs=xs,
ys=ys,
x_labels=[x_opt.format_value(p, x_opt, x) for x in xs],
y_labels=[y_opt.format_value(p, y_opt, y) for y in ys],
cell=cell,
draw_legend=draw_legend
)
if opts.grid_save:
images.save_image(processed.images[0], p.outpath_grids, "xy_grid", prompt=p.prompt, seed=processed.seed, grid=True, p=p)
# restore checkpoint in case it was changed by axes
modules.sd_models.reload_model_weights(shared.sd_model)
return processed
<|code_end|>
|
modules/sd_models.py
<|code_start|>import glob
import os.path
import sys
from collections import namedtuple
import torch
from omegaconf import OmegaConf
from ldm.util import instantiate_from_config
from modules import shared
CheckpointInfo = namedtuple("CheckpointInfo", ['filename', 'title', 'hash', 'model_name'])
checkpoints_list = {}
try:
# this silences the annoying "Some weights of the model checkpoint were not used when initializing..." message at start.
from transformers import logging
logging.set_verbosity_error()
except Exception:
pass
def list_models():
checkpoints_list.clear()
model_dir = os.path.abspath(shared.cmd_opts.ckpt_dir)
def modeltitle(path, h):
abspath = os.path.abspath(path)
if abspath.startswith(model_dir):
name = abspath.replace(model_dir, '')
else:
name = os.path.basename(path)
if name.startswith("\\") or name.startswith("/"):
name = name[1:]
return f'{name} [{h}]'
cmd_ckpt = shared.cmd_opts.ckpt
if os.path.exists(cmd_ckpt):
h = model_hash(cmd_ckpt)
title = modeltitle(cmd_ckpt, h)
model_name = title.rsplit(".",1)[0] # remove extension if present
checkpoints_list[title] = CheckpointInfo(cmd_ckpt, title, h, model_name)
elif cmd_ckpt is not None and cmd_ckpt != shared.default_sd_model_file:
print(f"Checkpoint in --ckpt argument not found: {cmd_ckpt}", file=sys.stderr)
if os.path.exists(model_dir):
for filename in glob.glob(model_dir + '/**/*.ckpt', recursive=True):
h = model_hash(filename)
title = modeltitle(filename, h)
model_name = title.rsplit(".",1)[0] # remove extension if present
checkpoints_list[title] = CheckpointInfo(filename, title, h, model_name)
def get_closet_checkpoint_match(searchString):
applicable = sorted([info for info in checkpoints_list.values() if searchString in info.title], key = lambda x:len(x.title))
if len(applicable)>0:
return applicable[0]
return None
def model_hash(filename):
try:
with open(filename, "rb") as file:
import hashlib
m = hashlib.sha256()
file.seek(0x100000)
m.update(file.read(0x10000))
return m.hexdigest()[0:8]
except FileNotFoundError:
return 'NOFILE'
def select_checkpoint():
model_checkpoint = shared.opts.sd_model_checkpoint
checkpoint_info = checkpoints_list.get(model_checkpoint, None)
if checkpoint_info is not None:
return checkpoint_info
if len(checkpoints_list) == 0:
print(f"No checkpoints found. When searching for checkpoints, looked at:", file=sys.stderr)
print(f" - file {os.path.abspath(shared.cmd_opts.ckpt)}", file=sys.stderr)
print(f" - directory {os.path.abspath(shared.cmd_opts.ckpt_dir)}", file=sys.stderr)
print(f"Can't run without a checkpoint. Find and place a .ckpt file into any of those locations. The program will exit.", file=sys.stderr)
exit(1)
checkpoint_info = next(iter(checkpoints_list.values()))
if model_checkpoint is not None:
print(f"Checkpoint {model_checkpoint} not found; loading fallback {checkpoint_info.title}", file=sys.stderr)
return checkpoint_info
def load_model_weights(model, checkpoint_file, sd_model_hash):
print(f"Loading weights [{sd_model_hash}] from {checkpoint_file}")
pl_sd = torch.load(checkpoint_file, map_location="cpu")
if "global_step" in pl_sd:
print(f"Global Step: {pl_sd['global_step']}")
sd = pl_sd["state_dict"]
model.load_state_dict(sd, strict=False)
if shared.cmd_opts.opt_channelslast:
model.to(memory_format=torch.channels_last)
if not shared.cmd_opts.no_half:
model.half()
model.sd_model_hash = sd_model_hash
model.sd_model_checkpint = checkpoint_file
def load_model():
from modules import lowvram, sd_hijack
checkpoint_info = select_checkpoint()
sd_config = OmegaConf.load(shared.cmd_opts.config)
sd_model = instantiate_from_config(sd_config.model)
load_model_weights(sd_model, checkpoint_info.filename, checkpoint_info.hash)
if shared.cmd_opts.lowvram or shared.cmd_opts.medvram:
lowvram.setup_for_low_vram(sd_model, shared.cmd_opts.medvram)
else:
sd_model.to(shared.device)
sd_hijack.model_hijack.hijack(sd_model)
sd_model.eval()
print(f"Model loaded.")
return sd_model
def reload_model_weights(sd_model, info=None):
from modules import lowvram, devices
checkpoint_info = info or select_checkpoint()
if sd_model.sd_model_checkpint == checkpoint_info.filename:
return
if shared.cmd_opts.lowvram or shared.cmd_opts.medvram:
lowvram.send_everything_to_cpu()
else:
sd_model.to(devices.cpu)
load_model_weights(sd_model, checkpoint_info.filename, checkpoint_info.hash)
if not shared.cmd_opts.lowvram and not shared.cmd_opts.medvram:
sd_model.to(devices.device)
print(f"Weights loaded.")
return sd_model
<|code_end|>
scripts/xy_grid.py
<|code_start|>from collections import namedtuple
from copy import copy
import random
from PIL import Image
import numpy as np
import modules.scripts as scripts
import gradio as gr
from modules import images
from modules.processing import process_images, Processed
from modules.shared import opts, cmd_opts, state
import modules.shared as shared
import modules.sd_samplers
import modules.sd_models
import re
def apply_field(field):
def fun(p, x, xs):
setattr(p, field, x)
return fun
def apply_prompt(p, x, xs):
p.prompt = p.prompt.replace(xs[0], x)
p.negative_prompt = p.negative_prompt.replace(xs[0], x)
samplers_dict = {}
for i, sampler in enumerate(modules.sd_samplers.samplers):
samplers_dict[sampler.name.lower()] = i
for alias in sampler.aliases:
samplers_dict[alias.lower()] = i
def apply_sampler(p, x, xs):
sampler_index = samplers_dict.get(x.lower(), None)
if sampler_index is None:
raise RuntimeError(f"Unknown sampler: {x}")
p.sampler_index = sampler_index
def apply_checkpoint(p, x, xs):
info = modules.sd_models.get_closet_checkpoint_match(x)
assert info is not None, f'Checkpoint for {x} not found'
modules.sd_models.reload_model_weights(shared.sd_model, info)
def format_value_add_label(p, opt, x):
if type(x) == float:
x = round(x, 8)
return f"{opt.label}: {x}"
def format_value(p, opt, x):
if type(x) == float:
x = round(x, 8)
return x
def do_nothing(p, x, xs):
pass
def format_nothing(p, opt, x):
return ""
AxisOption = namedtuple("AxisOption", ["label", "type", "apply", "format_value"])
AxisOptionImg2Img = namedtuple("AxisOptionImg2Img", ["label", "type", "apply", "format_value"])
axis_options = [
AxisOption("Nothing", str, do_nothing, format_nothing),
AxisOption("Seed", int, apply_field("seed"), format_value_add_label),
AxisOption("Var. seed", int, apply_field("subseed"), format_value_add_label),
AxisOption("Var. strength", float, apply_field("subseed_strength"), format_value_add_label),
AxisOption("Steps", int, apply_field("steps"), format_value_add_label),
AxisOption("CFG Scale", float, apply_field("cfg_scale"), format_value_add_label),
AxisOption("Prompt S/R", str, apply_prompt, format_value),
AxisOption("Sampler", str, apply_sampler, format_value),
AxisOption("Checkpoint name", str, apply_checkpoint, format_value),
AxisOption("Sigma Churn", float, apply_field("s_churn"), format_value_add_label),
AxisOption("Sigma min", float, apply_field("s_tmin"), format_value_add_label),
AxisOption("Sigma max", float, apply_field("s_tmax"), format_value_add_label),
AxisOption("Sigma noise", float, apply_field("s_noise"), format_value_add_label),
AxisOption("Eta", float, apply_field("eta"), format_value_add_label),
AxisOptionImg2Img("Denoising", float, apply_field("denoising_strength"), format_value_add_label), # as it is now all AxisOptionImg2Img items must go after AxisOption ones
]
def draw_xy_grid(p, xs, ys, x_labels, y_labels, cell, draw_legend):
res = []
ver_texts = [[images.GridAnnotation(y)] for y in y_labels]
hor_texts = [[images.GridAnnotation(x)] for x in x_labels]
first_pocessed = None
state.job_count = len(xs) * len(ys) * p.n_iter
for iy, y in enumerate(ys):
for ix, x in enumerate(xs):
state.job = f"{ix + iy * len(xs) + 1} out of {len(xs) * len(ys)}"
processed = cell(x, y)
if first_pocessed is None:
first_pocessed = processed
try:
res.append(processed.images[0])
except:
res.append(Image.new(res[0].mode, res[0].size))
grid = images.image_grid(res, rows=len(ys))
if draw_legend:
grid = images.draw_grid_annotations(grid, res[0].width, res[0].height, hor_texts, ver_texts)
first_pocessed.images = [grid]
return first_pocessed
re_range = re.compile(r"\s*([+-]?\s*\d+)\s*-\s*([+-]?\s*\d+)(?:\s*\(([+-]\d+)\s*\))?\s*")
re_range_float = re.compile(r"\s*([+-]?\s*\d+(?:.\d*)?)\s*-\s*([+-]?\s*\d+(?:.\d*)?)(?:\s*\(([+-]\d+(?:.\d*)?)\s*\))?\s*")
re_range_count = re.compile(r"\s*([+-]?\s*\d+)\s*-\s*([+-]?\s*\d+)(?:\s*\[(\d+)\s*\])?\s*")
re_range_count_float = re.compile(r"\s*([+-]?\s*\d+(?:.\d*)?)\s*-\s*([+-]?\s*\d+(?:.\d*)?)(?:\s*\[(\d+(?:.\d*)?)\s*\])?\s*")
class Script(scripts.Script):
def title(self):
return "X/Y plot"
def ui(self, is_img2img):
current_axis_options = [x for x in axis_options if type(x) == AxisOption or type(x) == AxisOptionImg2Img and is_img2img]
with gr.Row():
x_type = gr.Dropdown(label="X type", choices=[x.label for x in current_axis_options], value=current_axis_options[1].label, visible=False, type="index", elem_id="x_type")
x_values = gr.Textbox(label="X values", visible=False, lines=1)
with gr.Row():
y_type = gr.Dropdown(label="Y type", choices=[x.label for x in current_axis_options], value=current_axis_options[4].label, visible=False, type="index", elem_id="y_type")
y_values = gr.Textbox(label="Y values", visible=False, lines=1)
draw_legend = gr.Checkbox(label='Draw legend', value=True)
no_fixed_seeds = gr.Checkbox(label='Keep -1 for seeds', value=False)
return [x_type, x_values, y_type, y_values, draw_legend, no_fixed_seeds]
def run(self, p, x_type, x_values, y_type, y_values, draw_legend, no_fixed_seeds):
modules.processing.fix_seed(p)
p.batch_size = 1
def process_axis(opt, vals):
valslist = [x.strip() for x in vals.split(",")]
if opt.type == int:
valslist_ext = []
for val in valslist:
m = re_range.fullmatch(val)
mc = re_range_count.fullmatch(val)
if m is not None:
start = int(m.group(1))
end = int(m.group(2))+1
step = int(m.group(3)) if m.group(3) is not None else 1
valslist_ext += list(range(start, end, step))
elif mc is not None:
start = int(mc.group(1))
end = int(mc.group(2))
num = int(mc.group(3)) if mc.group(3) is not None else 1
valslist_ext += [int(x) for x in np.linspace(start=start, stop=end, num=num).tolist()]
else:
valslist_ext.append(val)
valslist = valslist_ext
elif opt.type == float:
valslist_ext = []
for val in valslist:
m = re_range_float.fullmatch(val)
mc = re_range_count_float.fullmatch(val)
if m is not None:
start = float(m.group(1))
end = float(m.group(2))
step = float(m.group(3)) if m.group(3) is not None else 1
valslist_ext += np.arange(start, end + step, step).tolist()
elif mc is not None:
start = float(mc.group(1))
end = float(mc.group(2))
num = int(mc.group(3)) if mc.group(3) is not None else 1
valslist_ext += np.linspace(start=start, stop=end, num=num).tolist()
else:
valslist_ext.append(val)
valslist = valslist_ext
valslist = [opt.type(x) for x in valslist]
return valslist
x_opt = axis_options[x_type]
xs = process_axis(x_opt, x_values)
y_opt = axis_options[y_type]
ys = process_axis(y_opt, y_values)
def fix_axis_seeds(axis_opt, axis_list):
if axis_opt.label == 'Seed':
return [int(random.randrange(4294967294)) if val is None or val == '' or val == -1 else val for val in axis_list]
else:
return axis_list
if not no_fixed_seeds:
xs = fix_axis_seeds(x_opt, xs)
ys = fix_axis_seeds(y_opt, ys)
if x_opt.label == 'Steps':
total_steps = sum(xs) * len(ys)
elif y_opt.label == 'Steps':
total_steps = sum(ys) * len(xs)
else:
total_steps = p.steps * len(xs) * len(ys)
print(f"X/Y plot will create {len(xs) * len(ys) * p.n_iter} images on a {len(xs)}x{len(ys)} grid. (Total steps to process: {total_steps * p.n_iter})")
shared.total_tqdm.updateTotal(total_steps * p.n_iter)
def cell(x, y):
pc = copy(p)
x_opt.apply(pc, x, xs)
y_opt.apply(pc, y, ys)
return process_images(pc)
processed = draw_xy_grid(
p,
xs=xs,
ys=ys,
x_labels=[x_opt.format_value(p, x_opt, x) for x in xs],
y_labels=[y_opt.format_value(p, y_opt, y) for y in ys],
cell=cell,
draw_legend=draw_legend
)
if opts.grid_save:
images.save_image(processed.images[0], p.outpath_grids, "xy_grid", prompt=p.prompt, seed=processed.seed, grid=True, p=p)
# restore checkpoint in case it was changed by axes
modules.sd_models.reload_model_weights(shared.sd_model)
return processed
<|code_end|>
|
"Save" button always saves to .png, ignoring settings
**Describe the bug**
When you press "Save", the image outputted to /log/ is always in png format, no matter what output format is selected in settings. The outputted images are properly saved to jpg, though. But in order for the "save" button to be useful, it should use the same format.
"Save" button always saves to .png, ignoring settings
**Describe the bug**
When you press "Save", the image outputted to /log/ is always in png format, no matter what output format is selected in settings. The outputted images are properly saved to jpg, though. But in order for the "save" button to be useful, it should use the same format.
|
modules/ui.py
<|code_start|>import base64
import html
import io
import json
import math
import mimetypes
import os
import random
import sys
import time
import traceback
import numpy as np
import torch
from PIL import Image, PngImagePlugin
import gradio as gr
import gradio.utils
import gradio.routes
from modules.paths import script_path
from modules.shared import opts, cmd_opts
import modules.shared as shared
from modules.sd_samplers import samplers, samplers_for_img2img
from modules.sd_hijack import model_hijack
import modules.ldsr_model
import modules.scripts
import modules.gfpgan_model
import modules.codeformer_model
import modules.styles
import modules.generation_parameters_copypaste
# this is a fix for Windows users. Without it, javascript files will be served with text/html content-type and the bowser will not show any UI
mimetypes.init()
mimetypes.add_type('application/javascript', '.js')
if not cmd_opts.share and not cmd_opts.listen:
# fix gradio phoning home
gradio.utils.version_check = lambda: None
gradio.utils.get_local_ip_address = lambda: '127.0.0.1'
def gr_show(visible=True):
return {"visible": visible, "__type__": "update"}
sample_img2img = "assets/stable-samples/img2img/sketch-mountains-input.jpg"
sample_img2img = sample_img2img if os.path.exists(sample_img2img) else None
css_hide_progressbar = """
.wrap .m-12 svg { display:none!important; }
.wrap .m-12::before { content:"Loading..." }
.progress-bar { display:none!important; }
.meta-text { display:none!important; }
"""
# Using constants for these since the variation selector isn't visible.
# Important that they exactly match script.js for tooltip to work.
random_symbol = '\U0001f3b2\ufe0f' # 🎲️
reuse_symbol = '\u267b\ufe0f' # ♻️
art_symbol = '\U0001f3a8' # 🎨
paste_symbol = '\u2199\ufe0f' # ↙
def plaintext_to_html(text):
text = "<p>" + "<br>\n".join([f"{html.escape(x)}" for x in text.split('\n')]) + "</p>"
return text
def image_from_url_text(filedata):
if type(filedata) == list:
if len(filedata) == 0:
return None
filedata = filedata[0]
if filedata.startswith("data:image/png;base64,"):
filedata = filedata[len("data:image/png;base64,"):]
filedata = base64.decodebytes(filedata.encode('utf-8'))
image = Image.open(io.BytesIO(filedata))
return image
def send_gradio_gallery_to_image(x):
if len(x) == 0:
return None
return image_from_url_text(x[0])
def save_files(js_data, images, index):
import csv
os.makedirs(opts.outdir_save, exist_ok=True)
filenames = []
data = json.loads(js_data)
if index > -1 and opts.save_selected_only and (index >= data["index_of_first_image"]): # ensures we are looking at a specific non-grid picture, and we have save_selected_only
images = [images[index]]
infotexts = [data["infotexts"][index]]
else:
infotexts = data["infotexts"]
with open(os.path.join(opts.outdir_save, "log.csv"), "a", encoding="utf8", newline='') as file:
at_start = file.tell() == 0
writer = csv.writer(file)
if at_start:
writer.writerow(["prompt", "seed", "width", "height", "sampler", "cfgs", "steps", "filename", "negative_prompt"])
filename_base = str(int(time.time() * 1000))
for i, filedata in enumerate(images):
filename = filename_base + ("" if len(images) == 1 else "-" + str(i + 1)) + ".png"
filepath = os.path.join(opts.outdir_save, filename)
if filedata.startswith("data:image/png;base64,"):
filedata = filedata[len("data:image/png;base64,"):]
pnginfo = PngImagePlugin.PngInfo()
pnginfo.add_text('parameters', infotexts[i])
image = Image.open(io.BytesIO(base64.decodebytes(filedata.encode('utf-8'))))
image.save(filepath, quality=opts.jpeg_quality, pnginfo=pnginfo)
filenames.append(filename)
writer.writerow([data["prompt"], data["seed"], data["width"], data["height"], data["sampler"], data["cfg_scale"], data["steps"], filenames[0], data["negative_prompt"]])
return '', '', plaintext_to_html(f"Saved: {filenames[0]}")
def wrap_gradio_call(func):
def f(*args, **kwargs):
run_memmon = opts.memmon_poll_rate > 0 and not shared.mem_mon.disabled
if run_memmon:
shared.mem_mon.monitor()
t = time.perf_counter()
try:
res = list(func(*args, **kwargs))
except Exception as e:
print("Error completing request", file=sys.stderr)
print("Arguments:", args, kwargs, file=sys.stderr)
print(traceback.format_exc(), file=sys.stderr)
shared.state.job = ""
shared.state.job_count = 0
res = [None, '', f"<div class='error'>{plaintext_to_html(type(e).__name__+': '+str(e))}</div>"]
elapsed = time.perf_counter() - t
if run_memmon:
mem_stats = {k: -(v//-(1024*1024)) for k, v in shared.mem_mon.stop().items()}
active_peak = mem_stats['active_peak']
reserved_peak = mem_stats['reserved_peak']
sys_peak = mem_stats['system_peak']
sys_total = mem_stats['total']
sys_pct = round(sys_peak/max(sys_total, 1) * 100, 2)
vram_html = f"<p class='vram'>Torch active/reserved: {active_peak}/{reserved_peak} MiB, <wbr>Sys VRAM: {sys_peak}/{sys_total} MiB ({sys_pct}%)</p>"
else:
vram_html = ''
# last item is always HTML
res[-1] += f"<div class='performance'><p class='time'>Time taken: <wbr>{elapsed:.2f}s</p>{vram_html}</div>"
shared.state.interrupted = False
return tuple(res)
return f
def check_progress_call(id_part):
if shared.state.job_count == 0:
return "", gr_show(False), gr_show(False)
progress = 0
if shared.state.job_count > 0:
progress += shared.state.job_no / shared.state.job_count
if shared.state.sampling_steps > 0:
progress += 1 / shared.state.job_count * shared.state.sampling_step / shared.state.sampling_steps
progress = min(progress, 1)
progressbar = ""
if opts.show_progressbar:
progressbar = f"""<div class='progressDiv'><div class='progress' style="width:{progress * 100}%">{str(int(progress*100))+"%" if progress > 0.01 else ""}</div></div>"""
image = gr_show(False)
preview_visibility = gr_show(False)
if opts.show_progress_every_n_steps > 0:
if shared.parallel_processing_allowed:
if shared.state.sampling_step - shared.state.current_image_sampling_step >= opts.show_progress_every_n_steps and shared.state.current_latent is not None:
shared.state.current_image = modules.sd_samplers.sample_to_image(shared.state.current_latent)
shared.state.current_image_sampling_step = shared.state.sampling_step
image = shared.state.current_image
if image is None:
image = gr.update(value=None)
else:
preview_visibility = gr_show(True)
return f"<span id='{id_part}_progress_span' style='display: none'>{time.time()}</span><p>{progressbar}</p>", preview_visibility, image
def check_progress_call_initial(id_part):
shared.state.job_count = -1
shared.state.current_latent = None
shared.state.current_image = None
return check_progress_call(id_part)
def roll_artist(prompt):
allowed_cats = set([x for x in shared.artist_db.categories() if len(opts.random_artist_categories)==0 or x in opts.random_artist_categories])
artist = random.choice([x for x in shared.artist_db.artists if x.category in allowed_cats])
return prompt + ", " + artist.name if prompt != '' else artist.name
def visit(x, func, path=""):
if hasattr(x, 'children'):
for c in x.children:
visit(c, func, path)
elif x.label is not None:
func(path + "/" + str(x.label), x)
def add_style(name: str, prompt: str, negative_prompt: str):
if name is None:
return [gr_show(), gr_show()]
style = modules.styles.PromptStyle(name, prompt, negative_prompt)
shared.prompt_styles.styles[style.name] = style
# Save all loaded prompt styles: this allows us to update the storage format in the future more easily, because we
# reserialize all styles every time we save them
shared.prompt_styles.save_styles(shared.styles_filename)
return [gr.Dropdown.update(visible=True, choices=list(shared.prompt_styles.styles)) for _ in range(4)]
def apply_styles(prompt, prompt_neg, style1_name, style2_name):
prompt = shared.prompt_styles.apply_styles_to_prompt(prompt, [style1_name, style2_name])
prompt_neg = shared.prompt_styles.apply_negative_styles_to_prompt(prompt_neg, [style1_name, style2_name])
return [gr.Textbox.update(value=prompt), gr.Textbox.update(value=prompt_neg), gr.Dropdown.update(value="None"), gr.Dropdown.update(value="None")]
def interrogate(image):
prompt = shared.interrogator.interrogate(image)
return gr_show(True) if prompt is None else prompt
def create_seed_inputs():
with gr.Row():
with gr.Box():
with gr.Row(elem_id='seed_row'):
seed = (gr.Textbox if cmd_opts.use_textbox_seed else gr.Number)(label='Seed', value=-1)
seed.style(container=False)
random_seed = gr.Button(random_symbol, elem_id='random_seed')
reuse_seed = gr.Button(reuse_symbol, elem_id='reuse_seed')
with gr.Box(elem_id='subseed_show_box'):
seed_checkbox = gr.Checkbox(label='Extra', elem_id='subseed_show', value=False)
# Components to show/hide based on the 'Extra' checkbox
seed_extras = []
with gr.Row(visible=False) as seed_extra_row_1:
seed_extras.append(seed_extra_row_1)
with gr.Box():
with gr.Row(elem_id='subseed_row'):
subseed = gr.Number(label='Variation seed', value=-1)
subseed.style(container=False)
random_subseed = gr.Button(random_symbol, elem_id='random_subseed')
reuse_subseed = gr.Button(reuse_symbol, elem_id='reuse_subseed')
subseed_strength = gr.Slider(label='Variation strength', value=0.0, minimum=0, maximum=1, step=0.01)
with gr.Row(visible=False) as seed_extra_row_2:
seed_extras.append(seed_extra_row_2)
seed_resize_from_w = gr.Slider(minimum=0, maximum=2048, step=64, label="Resize seed from width", value=0)
seed_resize_from_h = gr.Slider(minimum=0, maximum=2048, step=64, label="Resize seed from height", value=0)
random_seed.click(fn=lambda: -1, show_progress=False, inputs=[], outputs=[seed])
random_subseed.click(fn=lambda: -1, show_progress=False, inputs=[], outputs=[subseed])
def change_visibility(show):
return {comp: gr_show(show) for comp in seed_extras}
seed_checkbox.change(change_visibility, show_progress=False, inputs=[seed_checkbox], outputs=seed_extras)
return seed, reuse_seed, subseed, reuse_subseed, subseed_strength, seed_resize_from_h, seed_resize_from_w, seed_checkbox
def connect_reuse_seed(seed: gr.Number, reuse_seed: gr.Button, generation_info: gr.Textbox, dummy_component, is_subseed):
""" Connects a 'reuse (sub)seed' button's click event so that it copies last used
(sub)seed value from generation info the to the seed field. If copying subseed and subseed strength
was 0, i.e. no variation seed was used, it copies the normal seed value instead."""
def copy_seed(gen_info_string: str, index):
res = -1
try:
gen_info = json.loads(gen_info_string)
index -= gen_info.get('index_of_first_image', 0)
if is_subseed and gen_info.get('subseed_strength', 0) > 0:
all_subseeds = gen_info.get('all_subseeds', [-1])
res = all_subseeds[index if 0 <= index < len(all_subseeds) else 0]
else:
all_seeds = gen_info.get('all_seeds', [-1])
res = all_seeds[index if 0 <= index < len(all_seeds) else 0]
except json.decoder.JSONDecodeError as e:
if gen_info_string != '':
print("Error parsing JSON generation info:", file=sys.stderr)
print(gen_info_string, file=sys.stderr)
return [res, gr_show(False)]
reuse_seed.click(
fn=copy_seed,
_js="(x, y) => [x, selected_gallery_index()]",
show_progress=False,
inputs=[generation_info, dummy_component],
outputs=[seed, dummy_component]
)
def update_token_counter(text):
tokens, token_count, max_length = model_hijack.tokenize(text)
style_class = ' class="red"' if (token_count > max_length) else ""
return f"<span {style_class}>{token_count}/{max_length}</span>"
def create_toprow(is_img2img):
id_part = "img2img" if is_img2img else "txt2img"
with gr.Row(elem_id="toprow"):
with gr.Column(scale=4):
with gr.Row():
with gr.Column(scale=80):
with gr.Row():
prompt = gr.Textbox(label="Prompt", elem_id=f"{id_part}_prompt", show_label=False, placeholder="Prompt", lines=2)
with gr.Column(scale=1, elem_id="roll_col"):
roll = gr.Button(value=art_symbol, elem_id="roll", visible=len(shared.artist_db.artists) > 0)
paste = gr.Button(value=paste_symbol, elem_id="paste")
token_counter = gr.HTML(value="<span></span>", elem_id=f"{id_part}_token_counter")
hidden_button = gr.Button(visible=False, elem_id=f"{id_part}_token_button")
hidden_button.click(fn=update_token_counter, inputs=[prompt], outputs=[token_counter])
with gr.Column(scale=10, elem_id="style_pos_col"):
prompt_style = gr.Dropdown(label="Style 1", elem_id=f"{id_part}_style_index", choices=[k for k, v in shared.prompt_styles.styles.items()], value=next(iter(shared.prompt_styles.styles.keys())), visible=len(shared.prompt_styles.styles) > 1)
with gr.Row():
with gr.Column(scale=8):
negative_prompt = gr.Textbox(label="Negative prompt", elem_id="negative_prompt", show_label=False, placeholder="Negative prompt", lines=2)
with gr.Column(scale=1, elem_id="style_neg_col"):
prompt_style2 = gr.Dropdown(label="Style 2", elem_id=f"{id_part}_style2_index", choices=[k for k, v in shared.prompt_styles.styles.items()], value=next(iter(shared.prompt_styles.styles.keys())), visible=len(shared.prompt_styles.styles) > 1)
with gr.Column(scale=1):
with gr.Row():
interrupt = gr.Button('Interrupt', elem_id=f"{id_part}_interrupt")
submit = gr.Button('Generate', elem_id="generate", variant='primary')
interrupt.click(
fn=lambda: shared.state.interrupt(),
inputs=[],
outputs=[],
)
with gr.Row():
if is_img2img:
interrogate = gr.Button('Interrogate', elem_id="interrogate")
else:
interrogate = None
prompt_style_apply = gr.Button('Apply style', elem_id="style_apply")
save_style = gr.Button('Create style', elem_id="style_create")
return prompt, roll, prompt_style, negative_prompt, prompt_style2, submit, interrogate, prompt_style_apply, save_style, paste
def setup_progressbar(progressbar, preview, id_part):
check_progress = gr.Button('Check progress', elem_id=f"{id_part}_check_progress", visible=False)
check_progress.click(
fn=lambda: check_progress_call(id_part),
show_progress=False,
inputs=[],
outputs=[progressbar, preview, preview],
)
check_progress_initial = gr.Button('Check progress (first)', elem_id=f"{id_part}_check_progress_initial", visible=False)
check_progress_initial.click(
fn=lambda: check_progress_call_initial(id_part),
show_progress=False,
inputs=[],
outputs=[progressbar, preview, preview],
)
def create_ui(txt2img, img2img, run_extras, run_pnginfo, run_modelmerger):
with gr.Blocks(analytics_enabled=False) as txt2img_interface:
txt2img_prompt, roll, txt2img_prompt_style, txt2img_negative_prompt, txt2img_prompt_style2, submit, _, txt2img_prompt_style_apply, txt2img_save_style, paste = create_toprow(is_img2img=False)
dummy_component = gr.Label(visible=False)
with gr.Row(elem_id='txt2img_progress_row'):
with gr.Column(scale=1):
pass
with gr.Column(scale=1):
progressbar = gr.HTML(elem_id="txt2img_progressbar")
txt2img_preview = gr.Image(elem_id='txt2img_preview', visible=False)
setup_progressbar(progressbar, txt2img_preview, 'txt2img')
with gr.Row().style(equal_height=False):
with gr.Column(variant='panel'):
steps = gr.Slider(minimum=1, maximum=150, step=1, label="Sampling Steps", value=20)
sampler_index = gr.Radio(label='Sampling method', elem_id="txt2img_sampling", choices=[x.name for x in samplers], value=samplers[0].name, type="index")
with gr.Group():
width = gr.Slider(minimum=64, maximum=2048, step=64, label="Width", value=512)
height = gr.Slider(minimum=64, maximum=2048, step=64, label="Height", value=512)
with gr.Row():
restore_faces = gr.Checkbox(label='Restore faces', value=False, visible=len(shared.face_restorers) > 1)
tiling = gr.Checkbox(label='Tiling', value=False)
enable_hr = gr.Checkbox(label='Highres. fix', value=False)
with gr.Row(visible=False) as hr_options:
scale_latent = gr.Checkbox(label='Scale latent', value=False)
denoising_strength = gr.Slider(minimum=0.0, maximum=1.0, step=0.01, label='Denoising strength', value=0.7)
with gr.Row():
batch_count = gr.Slider(minimum=1, maximum=cmd_opts.max_batch_count, step=1, label='Batch count', value=1)
batch_size = gr.Slider(minimum=1, maximum=8, step=1, label='Batch size', value=1)
cfg_scale = gr.Slider(minimum=1.0, maximum=30.0, step=0.5, label='CFG Scale', value=7.0)
seed, reuse_seed, subseed, reuse_subseed, subseed_strength, seed_resize_from_h, seed_resize_from_w, seed_checkbox = create_seed_inputs()
with gr.Group():
custom_inputs = modules.scripts.scripts_txt2img.setup_ui(is_img2img=False)
with gr.Column(variant='panel'):
with gr.Group():
txt2img_preview = gr.Image(elem_id='txt2img_preview', visible=False)
txt2img_gallery = gr.Gallery(label='Output', show_label=False, elem_id='txt2img_gallery').style(grid=4)
with gr.Group():
with gr.Row():
save = gr.Button('Save')
send_to_img2img = gr.Button('Send to img2img')
send_to_inpaint = gr.Button('Send to inpaint')
send_to_extras = gr.Button('Send to extras')
with gr.Group():
html_info = gr.HTML()
generation_info = gr.Textbox(visible=False)
connect_reuse_seed(seed, reuse_seed, generation_info, dummy_component, is_subseed=False)
connect_reuse_seed(subseed, reuse_subseed, generation_info, dummy_component, is_subseed=True)
txt2img_args = dict(
fn=txt2img,
_js="submit",
inputs=[
txt2img_prompt,
txt2img_negative_prompt,
txt2img_prompt_style,
txt2img_prompt_style2,
steps,
sampler_index,
restore_faces,
tiling,
batch_count,
batch_size,
cfg_scale,
seed,
subseed, subseed_strength, seed_resize_from_h, seed_resize_from_w, seed_checkbox,
height,
width,
enable_hr,
scale_latent,
denoising_strength,
] + custom_inputs,
outputs=[
txt2img_gallery,
generation_info,
html_info
],
show_progress=False,
)
txt2img_prompt.submit(**txt2img_args)
submit.click(**txt2img_args)
enable_hr.change(
fn=lambda x: gr_show(x),
inputs=[enable_hr],
outputs=[hr_options],
)
save.click(
fn=wrap_gradio_call(save_files),
_js="(x, y, z) => [x, y, selected_gallery_index()]",
inputs=[
generation_info,
txt2img_gallery,
html_info,
],
outputs=[
html_info,
html_info,
html_info,
]
)
roll.click(
fn=roll_artist,
inputs=[
txt2img_prompt,
],
outputs=[
txt2img_prompt,
]
)
txt2img_paste_fields = [
(txt2img_prompt, "Prompt"),
(txt2img_negative_prompt, "Negative prompt"),
(steps, "Steps"),
(sampler_index, "Sampler"),
(restore_faces, "Face restoration"),
(cfg_scale, "CFG scale"),
(seed, "Seed"),
(width, "Size-1"),
(height, "Size-2"),
(batch_size, "Batch size"),
(subseed, "Variation seed"),
(subseed_strength, "Variation seed strength"),
(seed_resize_from_w, "Seed resize from-1"),
(seed_resize_from_h, "Seed resize from-2"),
(denoising_strength, "Denoising strength"),
(enable_hr, lambda d: "Denoising strength" in d),
(hr_options, lambda d: gr.Row.update(visible="Denoising strength" in d)),
]
modules.generation_parameters_copypaste.connect_paste(paste, txt2img_paste_fields, txt2img_prompt)
with gr.Blocks(analytics_enabled=False) as img2img_interface:
img2img_prompt, roll, img2img_prompt_style, img2img_negative_prompt, img2img_prompt_style2, submit, img2img_interrogate, img2img_prompt_style_apply, img2img_save_style, paste = create_toprow(is_img2img=True)
with gr.Row(elem_id='img2img_progress_row'):
with gr.Column(scale=1):
pass
with gr.Column(scale=1):
progressbar = gr.HTML(elem_id="img2img_progressbar")
img2img_preview = gr.Image(elem_id='img2img_preview', visible=False)
setup_progressbar(progressbar, img2img_preview, 'img2img')
with gr.Row().style(equal_height=False):
with gr.Column(variant='panel'):
with gr.Tabs(elem_id="mode_img2img") as tabs_img2img_mode:
with gr.TabItem('img2img', id='img2img'):
init_img = gr.Image(label="Image for img2img", show_label=False, source="upload", interactive=True, type="pil")
with gr.TabItem('Inpaint', id='inpaint'):
init_img_with_mask = gr.Image(label="Image for inpainting with mask", show_label=False, elem_id="img2maskimg", source="upload", interactive=True, type="pil", tool="sketch", image_mode="RGBA")
init_img_inpaint = gr.Image(label="Image for img2img", show_label=False, source="upload", interactive=True, type="pil", visible=False, elem_id="img_inpaint_base")
init_mask_inpaint = gr.Image(label="Mask", source="upload", interactive=True, type="pil", visible=False, elem_id="img_inpaint_mask")
mask_blur = gr.Slider(label='Mask blur', minimum=0, maximum=64, step=1, value=4)
with gr.Row():
mask_mode = gr.Radio(label="Mask mode", show_label=False, choices=["Draw mask", "Upload mask"], type="index", value="Draw mask", elem_id="mask_mode")
inpainting_mask_invert = gr.Radio(label='Masking mode', show_label=False, choices=['Inpaint masked', 'Inpaint not masked'], value='Inpaint masked', type="index")
inpainting_fill = gr.Radio(label='Masked content', choices=['fill', 'original', 'latent noise', 'latent nothing'], value='fill', type="index")
with gr.Row():
inpaint_full_res = gr.Checkbox(label='Inpaint at full resolution', value=False)
inpaint_full_res_padding = gr.Slider(label='Inpaint at full resolution padding, pixels', minimum=0, maximum=256, step=4, value=32)
with gr.TabItem('Batch img2img', id='batch'):
hidden = '<br>Disabled when launched with --hide-ui-dir-config.' if shared.cmd_opts.hide_ui_dir_config else ''
gr.HTML(f"<p class=\"text-gray-500\">Process images in a directory on the same machine where the server is running.{hidden}</p>")
img2img_batch_input_dir = gr.Textbox(label="Input directory", **shared.hide_dirs)
img2img_batch_output_dir = gr.Textbox(label="Output directory", **shared.hide_dirs)
with gr.Row():
resize_mode = gr.Radio(label="Resize mode", elem_id="resize_mode", show_label=False, choices=["Just resize", "Crop and resize", "Resize and fill"], type="index", value="Just resize")
steps = gr.Slider(minimum=1, maximum=150, step=1, label="Sampling Steps", value=20)
sampler_index = gr.Radio(label='Sampling method', choices=[x.name for x in samplers_for_img2img], value=samplers_for_img2img[0].name, type="index")
with gr.Group():
width = gr.Slider(minimum=64, maximum=2048, step=64, label="Width", value=512)
height = gr.Slider(minimum=64, maximum=2048, step=64, label="Height", value=512)
with gr.Row():
restore_faces = gr.Checkbox(label='Restore faces', value=False, visible=len(shared.face_restorers) > 1)
tiling = gr.Checkbox(label='Tiling', value=False)
with gr.Row():
batch_count = gr.Slider(minimum=1, maximum=cmd_opts.max_batch_count, step=1, label='Batch count', value=1)
batch_size = gr.Slider(minimum=1, maximum=8, step=1, label='Batch size', value=1)
with gr.Group():
cfg_scale = gr.Slider(minimum=1.0, maximum=30.0, step=0.5, label='CFG Scale', value=7.0)
denoising_strength = gr.Slider(minimum=0.0, maximum=1.0, step=0.01, label='Denoising strength', value=0.75)
seed, reuse_seed, subseed, reuse_subseed, subseed_strength, seed_resize_from_h, seed_resize_from_w, seed_checkbox = create_seed_inputs()
with gr.Group():
custom_inputs = modules.scripts.scripts_img2img.setup_ui(is_img2img=True)
with gr.Column(variant='panel'):
with gr.Group():
img2img_preview = gr.Image(elem_id='img2img_preview', visible=False)
img2img_gallery = gr.Gallery(label='Output', show_label=False, elem_id='img2img_gallery').style(grid=4)
with gr.Group():
with gr.Row():
save = gr.Button('Save')
img2img_send_to_img2img = gr.Button('Send to img2img')
img2img_send_to_inpaint = gr.Button('Send to inpaint')
img2img_send_to_extras = gr.Button('Send to extras')
with gr.Group():
html_info = gr.HTML()
generation_info = gr.Textbox(visible=False)
connect_reuse_seed(seed, reuse_seed, generation_info, dummy_component, is_subseed=False)
connect_reuse_seed(subseed, reuse_subseed, generation_info, dummy_component, is_subseed=True)
mask_mode.change(
lambda mode, img: {
init_img_with_mask: gr_show(mode == 0),
init_img_inpaint: gr_show(mode == 1),
init_mask_inpaint: gr_show(mode == 1),
},
inputs=[mask_mode, init_img_with_mask],
outputs=[
init_img_with_mask,
init_img_inpaint,
init_mask_inpaint,
],
)
img2img_args = dict(
fn=img2img,
_js="submit_img2img",
inputs=[
dummy_component,
img2img_prompt,
img2img_negative_prompt,
img2img_prompt_style,
img2img_prompt_style2,
init_img,
init_img_with_mask,
init_img_inpaint,
init_mask_inpaint,
mask_mode,
steps,
sampler_index,
mask_blur,
inpainting_fill,
restore_faces,
tiling,
batch_count,
batch_size,
cfg_scale,
denoising_strength,
seed,
subseed, subseed_strength, seed_resize_from_h, seed_resize_from_w, seed_checkbox,
height,
width,
resize_mode,
inpaint_full_res,
inpaint_full_res_padding,
inpainting_mask_invert,
img2img_batch_input_dir,
img2img_batch_output_dir,
] + custom_inputs,
outputs=[
img2img_gallery,
generation_info,
html_info
],
show_progress=False,
)
img2img_prompt.submit(**img2img_args)
submit.click(**img2img_args)
img2img_interrogate.click(
fn=interrogate,
inputs=[init_img],
outputs=[img2img_prompt],
)
save.click(
fn=wrap_gradio_call(save_files),
_js="(x, y, z) => [x, y, selected_gallery_index()]",
inputs=[
generation_info,
img2img_gallery,
html_info
],
outputs=[
html_info,
html_info,
html_info,
]
)
roll.click(
fn=roll_artist,
inputs=[
img2img_prompt,
],
outputs=[
img2img_prompt,
]
)
prompts = [(txt2img_prompt, txt2img_negative_prompt), (img2img_prompt, img2img_negative_prompt)]
style_dropdowns = [(txt2img_prompt_style, txt2img_prompt_style2), (img2img_prompt_style, img2img_prompt_style2)]
for button, (prompt, negative_prompt) in zip([txt2img_save_style, img2img_save_style], prompts):
button.click(
fn=add_style,
_js="ask_for_style_name",
# Have to pass empty dummy component here, because the JavaScript and Python function have to accept
# the same number of parameters, but we only know the style-name after the JavaScript prompt
inputs=[dummy_component, prompt, negative_prompt],
outputs=[txt2img_prompt_style, img2img_prompt_style, txt2img_prompt_style2, img2img_prompt_style2],
)
for button, (prompt, negative_prompt), (style1, style2) in zip([txt2img_prompt_style_apply, img2img_prompt_style_apply], prompts, style_dropdowns):
button.click(
fn=apply_styles,
inputs=[prompt, negative_prompt, style1, style2],
outputs=[prompt, negative_prompt, style1, style2],
)
img2img_paste_fields = [
(img2img_prompt, "Prompt"),
(img2img_negative_prompt, "Negative prompt"),
(steps, "Steps"),
(sampler_index, "Sampler"),
(restore_faces, "Face restoration"),
(cfg_scale, "CFG scale"),
(seed, "Seed"),
(width, "Size-1"),
(height, "Size-2"),
(batch_size, "Batch size"),
(subseed, "Variation seed"),
(subseed_strength, "Variation seed strength"),
(seed_resize_from_w, "Seed resize from-1"),
(seed_resize_from_h, "Seed resize from-2"),
(denoising_strength, "Denoising strength"),
]
modules.generation_parameters_copypaste.connect_paste(paste, img2img_paste_fields, img2img_prompt)
with gr.Blocks(analytics_enabled=False) as extras_interface:
with gr.Row().style(equal_height=False):
with gr.Column(variant='panel'):
with gr.Tabs(elem_id="mode_extras"):
with gr.TabItem('Single Image'):
extras_image = gr.Image(label="Source", source="upload", interactive=True, type="pil")
with gr.TabItem('Batch Process'):
image_batch = gr.File(label="Batch Process", file_count="multiple", interactive=True, type="file")
upscaling_resize = gr.Slider(minimum=1.0, maximum=4.0, step=0.05, label="Resize", value=2)
with gr.Group():
extras_upscaler_1 = gr.Radio(label='Upscaler 1', choices=[x.name for x in shared.sd_upscalers], value=shared.sd_upscalers[0].name, type="index")
with gr.Group():
extras_upscaler_2 = gr.Radio(label='Upscaler 2', choices=[x.name for x in shared.sd_upscalers], value=shared.sd_upscalers[0].name, type="index")
extras_upscaler_2_visibility = gr.Slider(minimum=0.0, maximum=1.0, step=0.001, label="Upscaler 2 visibility", value=1)
with gr.Group():
gfpgan_visibility = gr.Slider(minimum=0.0, maximum=1.0, step=0.001, label="GFPGAN visibility", value=0, interactive=modules.gfpgan_model.have_gfpgan)
with gr.Group():
codeformer_visibility = gr.Slider(minimum=0.0, maximum=1.0, step=0.001, label="CodeFormer visibility", value=0, interactive=modules.codeformer_model.have_codeformer)
codeformer_weight = gr.Slider(minimum=0.0, maximum=1.0, step=0.001, label="CodeFormer weight (0 = maximum effect, 1 = minimum effect)", value=0, interactive=modules.codeformer_model.have_codeformer)
submit = gr.Button('Generate', elem_id="extras_generate", variant='primary')
with gr.Column(variant='panel'):
result_images = gr.Gallery(label="Result", show_label=False)
html_info_x = gr.HTML()
html_info = gr.HTML()
extras_send_to_img2img = gr.Button('Send to img2img')
extras_send_to_inpaint = gr.Button('Send to inpaint')
submit.click(
fn=run_extras,
_js="get_extras_tab_index",
inputs=[
dummy_component,
extras_image,
image_batch,
gfpgan_visibility,
codeformer_visibility,
codeformer_weight,
upscaling_resize,
extras_upscaler_1,
extras_upscaler_2,
extras_upscaler_2_visibility,
],
outputs=[
result_images,
html_info_x,
html_info,
]
)
extras_send_to_img2img.click(
fn=lambda x: image_from_url_text(x),
_js="extract_image_from_gallery_img2img",
inputs=[result_images],
outputs=[init_img],
)
extras_send_to_inpaint.click(
fn=lambda x: image_from_url_text(x),
_js="extract_image_from_gallery_img2img",
inputs=[result_images],
outputs=[init_img_with_mask],
)
with gr.Blocks(analytics_enabled=False) as pnginfo_interface:
with gr.Row().style(equal_height=False):
with gr.Column(variant='panel'):
image = gr.Image(elem_id="pnginfo_image", label="Source", source="upload", interactive=True, type="pil")
with gr.Column(variant='panel'):
html = gr.HTML()
generation_info = gr.Textbox(visible=False)
html2 = gr.HTML()
with gr.Row():
pnginfo_send_to_txt2img = gr.Button('Send to txt2img')
pnginfo_send_to_img2img = gr.Button('Send to img2img')
image.change(
fn=wrap_gradio_call(run_pnginfo),
inputs=[image],
outputs=[html, generation_info, html2],
)
with gr.Blocks() as modelmerger_interface:
with gr.Row().style(equal_height=False):
with gr.Column(variant='panel'):
gr.HTML(value="<p>A merger of the two checkpoints will be generated in your <b>checkpoint</b> directory.</p>")
with gr.Row():
primary_model_name = gr.Dropdown(modules.sd_models.checkpoint_tiles(), elem_id="modelmerger_primary_model_name", label="Primary Model Name")
secondary_model_name = gr.Dropdown(modules.sd_models.checkpoint_tiles(), elem_id="modelmerger_secondary_model_name", label="Secondary Model Name")
interp_amount = gr.Slider(minimum=0.0, maximum=1.0, step=0.05, label='Interpolation Amount', value=0.3)
interp_method = gr.Radio(choices=["Weighted Sum", "Sigmoid", "Inverse Sigmoid"], value="Weighted Sum", label="Interpolation Method")
save_as_half = gr.Checkbox(value=False, label="Safe as float16")
modelmerger_merge = gr.Button(elem_id="modelmerger_merge", label="Merge", variant='primary')
with gr.Column(variant='panel'):
submit_result = gr.Textbox(elem_id="modelmerger_result", show_label=False)
def create_setting_component(key):
def fun():
return opts.data[key] if key in opts.data else opts.data_labels[key].default
info = opts.data_labels[key]
t = type(info.default)
args = info.component_args() if callable(info.component_args) else info.component_args
if info.component is not None:
comp = info.component
elif t == str:
comp = gr.Textbox
elif t == int:
comp = gr.Number
elif t == bool:
comp = gr.Checkbox
else:
raise Exception(f'bad options item type: {str(t)} for key {key}')
return comp(label=info.label, value=fun, **(args or {}))
components = []
component_dict = {}
def run_settings(*args):
changed = 0
for key, value, comp in zip(opts.data_labels.keys(), args, components):
if not opts.same_type(value, opts.data_labels[key].default):
return f"Bad value for setting {key}: {value}; expecting {type(opts.data_labels[key].default).__name__}"
for key, value, comp in zip(opts.data_labels.keys(), args, components):
comp_args = opts.data_labels[key].component_args
if comp_args and isinstance(comp_args, dict) and comp_args.get('visible') is False:
continue
oldval = opts.data.get(key, None)
opts.data[key] = value
if oldval != value:
if opts.data_labels[key].onchange is not None:
opts.data_labels[key].onchange()
changed += 1
opts.save(shared.config_filename)
return f'{changed} settings changed.', opts.dumpjson()
with gr.Blocks(analytics_enabled=False) as settings_interface:
settings_submit = gr.Button(value="Apply settings", variant='primary')
result = gr.HTML()
settings_cols = 3
items_per_col = int(len(opts.data_labels) * 0.9 / settings_cols)
cols_displayed = 0
items_displayed = 0
previous_section = None
column = None
with gr.Row(elem_id="settings").style(equal_height=False):
for i, (k, item) in enumerate(opts.data_labels.items()):
if previous_section != item.section:
if cols_displayed < settings_cols and (items_displayed >= items_per_col or previous_section is None):
if column is not None:
column.__exit__()
column = gr.Column(variant='panel')
column.__enter__()
items_displayed = 0
cols_displayed += 1
previous_section = item.section
gr.HTML(elem_id="settings_header_text_{}".format(item.section[0]), value='<h1 class="gr-button-lg">{}</h1>'.format(item.section[1]))
component = create_setting_component(k)
component_dict[k] = component
components.append(component)
items_displayed += 1
request_notifications = gr.Button(value='Request browser notifications', elem_id="request_notifications")
request_notifications.click(
fn=lambda: None,
inputs=[],
outputs=[],
_js='function(){}'
)
if column is not None:
column.__exit__()
interfaces = [
(txt2img_interface, "txt2img", "txt2img"),
(img2img_interface, "img2img", "img2img"),
(extras_interface, "Extras", "extras"),
(pnginfo_interface, "PNG Info", "pnginfo"),
(modelmerger_interface, "Checkpoint Merger", "modelmerger"),
(settings_interface, "Settings", "settings"),
]
with open(os.path.join(script_path, "style.css"), "r", encoding="utf8") as file:
css = file.read()
if os.path.exists(os.path.join(script_path, "user.css")):
with open(os.path.join(script_path, "user.css"), "r", encoding="utf8") as file:
usercss = file.read()
css += usercss
if not cmd_opts.no_progressbar_hiding:
css += css_hide_progressbar
with gr.Blocks(css=css, analytics_enabled=False, title="Stable Diffusion") as demo:
with gr.Tabs() as tabs:
for interface, label, ifid in interfaces:
with gr.TabItem(label, id=ifid):
interface.render()
if os.path.exists(os.path.join(script_path, "notification.mp3")):
audio_notification = gr.Audio(interactive=False, value=os.path.join(script_path, "notification.mp3"), elem_id="audio_notification", visible=False)
text_settings = gr.Textbox(elem_id="settings_json", value=lambda: opts.dumpjson(), visible=False)
settings_submit.click(
fn=run_settings,
inputs=components,
outputs=[result, text_settings],
)
modelmerger_merge.click(
fn=run_modelmerger,
inputs=[
primary_model_name,
secondary_model_name,
interp_method,
interp_amount,
save_as_half,
],
outputs=[
submit_result,
primary_model_name,
secondary_model_name,
component_dict['sd_model_checkpoint'],
]
)
paste_field_names = ['Prompt', 'Negative prompt', 'Steps', 'Face restoration', 'Seed', 'Size-1', 'Size-2']
txt2img_fields = [field for field,name in txt2img_paste_fields if name in paste_field_names]
img2img_fields = [field for field,name in img2img_paste_fields if name in paste_field_names]
send_to_img2img.click(
fn=lambda img, *args: (image_from_url_text(img),*args),
_js="(gallery, ...args) => [extract_image_from_gallery_img2img(gallery), ...args]",
inputs=[txt2img_gallery] + txt2img_fields,
outputs=[init_img] + img2img_fields,
)
send_to_inpaint.click(
fn=lambda x, *args: (image_from_url_text(x), *args),
_js="(gallery, ...args) => [extract_image_from_gallery_inpaint(gallery), ...args]",
inputs=[txt2img_gallery] + txt2img_fields,
outputs=[init_img_with_mask] + img2img_fields,
)
img2img_send_to_img2img.click(
fn=lambda x: image_from_url_text(x),
_js="extract_image_from_gallery_img2img",
inputs=[img2img_gallery],
outputs=[init_img],
)
img2img_send_to_inpaint.click(
fn=lambda x: image_from_url_text(x),
_js="extract_image_from_gallery_inpaint",
inputs=[img2img_gallery],
outputs=[init_img_with_mask],
)
send_to_extras.click(
fn=lambda x: image_from_url_text(x),
_js="extract_image_from_gallery_extras",
inputs=[txt2img_gallery],
outputs=[extras_image],
)
img2img_send_to_extras.click(
fn=lambda x: image_from_url_text(x),
_js="extract_image_from_gallery_extras",
inputs=[img2img_gallery],
outputs=[extras_image],
)
modules.generation_parameters_copypaste.connect_paste(pnginfo_send_to_txt2img, txt2img_paste_fields, generation_info, 'switch_to_txt2img')
modules.generation_parameters_copypaste.connect_paste(pnginfo_send_to_img2img, img2img_paste_fields, generation_info, 'switch_to_img2img_img2img')
ui_config_file = cmd_opts.ui_config_file
ui_settings = {}
settings_count = len(ui_settings)
error_loading = False
try:
if os.path.exists(ui_config_file):
with open(ui_config_file, "r", encoding="utf8") as file:
ui_settings = json.load(file)
except Exception:
error_loading = True
print("Error loading settings:", file=sys.stderr)
print(traceback.format_exc(), file=sys.stderr)
def loadsave(path, x):
def apply_field(obj, field, condition=None):
key = path + "/" + field
if getattr(obj,'custom_script_source',None) is not None:
key = 'customscript/' + obj.custom_script_source + '/' + key
if getattr(obj, 'do_not_save_to_config', False):
return
saved_value = ui_settings.get(key, None)
if saved_value is None:
ui_settings[key] = getattr(obj, field)
elif condition is None or condition(saved_value):
setattr(obj, field, saved_value)
if type(x) in [gr.Slider, gr.Radio, gr.Checkbox, gr.Textbox, gr.Number] and x.visible:
apply_field(x, 'visible')
if type(x) == gr.Slider:
apply_field(x, 'value')
apply_field(x, 'minimum')
apply_field(x, 'maximum')
apply_field(x, 'step')
if type(x) == gr.Radio:
apply_field(x, 'value', lambda val: val in x.choices)
if type(x) == gr.Checkbox:
apply_field(x, 'value')
if type(x) == gr.Textbox:
apply_field(x, 'value')
if type(x) == gr.Number:
apply_field(x, 'value')
visit(txt2img_interface, loadsave, "txt2img")
visit(img2img_interface, loadsave, "img2img")
visit(extras_interface, loadsave, "extras")
if not error_loading and (not os.path.exists(ui_config_file) or settings_count != len(ui_settings)):
with open(ui_config_file, "w", encoding="utf8") as file:
json.dump(ui_settings, file, indent=4)
return demo
with open(os.path.join(script_path, "script.js"), "r", encoding="utf8") as jsfile:
javascript = f'<script>{jsfile.read()}</script>'
jsdir = os.path.join(script_path, "javascript")
for filename in sorted(os.listdir(jsdir)):
with open(os.path.join(jsdir, filename), "r", encoding="utf8") as jsfile:
javascript += f"\n<script>{jsfile.read()}</script>"
def template_response(*args, **kwargs):
res = gradio_routes_templates_response(*args, **kwargs)
res.body = res.body.replace(b'</head>', f'{javascript}</head>'.encode("utf8"))
res.init_headers()
return res
gradio_routes_templates_response = gradio.routes.templates.TemplateResponse
gradio.routes.templates.TemplateResponse = template_response
<|code_end|>
|
modules/ui.py
<|code_start|>import base64
import html
import io
import json
import math
import mimetypes
import os
import random
import sys
import time
import traceback
import numpy as np
import torch
from PIL import Image, PngImagePlugin
import piexif
import gradio as gr
import gradio.utils
import gradio.routes
from modules.paths import script_path
from modules.shared import opts, cmd_opts
import modules.shared as shared
from modules.sd_samplers import samplers, samplers_for_img2img
from modules.sd_hijack import model_hijack
import modules.ldsr_model
import modules.scripts
import modules.gfpgan_model
import modules.codeformer_model
import modules.styles
import modules.generation_parameters_copypaste
# this is a fix for Windows users. Without it, javascript files will be served with text/html content-type and the bowser will not show any UI
mimetypes.init()
mimetypes.add_type('application/javascript', '.js')
if not cmd_opts.share and not cmd_opts.listen:
# fix gradio phoning home
gradio.utils.version_check = lambda: None
gradio.utils.get_local_ip_address = lambda: '127.0.0.1'
def gr_show(visible=True):
return {"visible": visible, "__type__": "update"}
sample_img2img = "assets/stable-samples/img2img/sketch-mountains-input.jpg"
sample_img2img = sample_img2img if os.path.exists(sample_img2img) else None
css_hide_progressbar = """
.wrap .m-12 svg { display:none!important; }
.wrap .m-12::before { content:"Loading..." }
.progress-bar { display:none!important; }
.meta-text { display:none!important; }
"""
# Using constants for these since the variation selector isn't visible.
# Important that they exactly match script.js for tooltip to work.
random_symbol = '\U0001f3b2\ufe0f' # 🎲️
reuse_symbol = '\u267b\ufe0f' # ♻️
art_symbol = '\U0001f3a8' # 🎨
paste_symbol = '\u2199\ufe0f' # ↙
def plaintext_to_html(text):
text = "<p>" + "<br>\n".join([f"{html.escape(x)}" for x in text.split('\n')]) + "</p>"
return text
def image_from_url_text(filedata):
if type(filedata) == list:
if len(filedata) == 0:
return None
filedata = filedata[0]
if filedata.startswith("data:image/png;base64,"):
filedata = filedata[len("data:image/png;base64,"):]
filedata = base64.decodebytes(filedata.encode('utf-8'))
image = Image.open(io.BytesIO(filedata))
return image
def send_gradio_gallery_to_image(x):
if len(x) == 0:
return None
return image_from_url_text(x[0])
def save_files(js_data, images, index):
import csv
os.makedirs(opts.outdir_save, exist_ok=True)
filenames = []
data = json.loads(js_data)
if index > -1 and opts.save_selected_only and (index >= data["index_of_first_image"]): # ensures we are looking at a specific non-grid picture, and we have save_selected_only
images = [images[index]]
infotexts = [data["infotexts"][index]]
else:
infotexts = data["infotexts"]
with open(os.path.join(opts.outdir_save, "log.csv"), "a", encoding="utf8", newline='') as file:
at_start = file.tell() == 0
writer = csv.writer(file)
if at_start:
writer.writerow(["prompt", "seed", "width", "height", "sampler", "cfgs", "steps", "filename", "negative_prompt"])
filename_base = str(int(time.time() * 1000))
extension = opts.samples_format.lower()
for i, filedata in enumerate(images):
filename = filename_base + ("" if len(images) == 1 else "-" + str(i + 1)) + f".{extension}"
filepath = os.path.join(opts.outdir_save, filename)
if filedata.startswith("data:image/png;base64,"):
filedata = filedata[len("data:image/png;base64,"):]
image = Image.open(io.BytesIO(base64.decodebytes(filedata.encode('utf-8'))))
if opts.enable_pnginfo and extension == 'png':
pnginfo = PngImagePlugin.PngInfo()
pnginfo.add_text('parameters', infotexts[i])
image.save(filepath, pnginfo=pnginfo)
else:
image.save(filepath, quality=opts.jpeg_quality)
if opts.enable_pnginfo and extension in ("jpg", "jpeg", "webp"):
piexif.insert(piexif.dump({"Exif": {
piexif.ExifIFD.UserComment: piexif.helper.UserComment.dump(infotexts[i], encoding="unicode")
}}), filepath)
filenames.append(filename)
writer.writerow([data["prompt"], data["seed"], data["width"], data["height"], data["sampler"], data["cfg_scale"], data["steps"], filenames[0], data["negative_prompt"]])
return '', '', plaintext_to_html(f"Saved: {filenames[0]}")
def wrap_gradio_call(func):
def f(*args, **kwargs):
run_memmon = opts.memmon_poll_rate > 0 and not shared.mem_mon.disabled
if run_memmon:
shared.mem_mon.monitor()
t = time.perf_counter()
try:
res = list(func(*args, **kwargs))
except Exception as e:
print("Error completing request", file=sys.stderr)
print("Arguments:", args, kwargs, file=sys.stderr)
print(traceback.format_exc(), file=sys.stderr)
shared.state.job = ""
shared.state.job_count = 0
res = [None, '', f"<div class='error'>{plaintext_to_html(type(e).__name__+': '+str(e))}</div>"]
elapsed = time.perf_counter() - t
if run_memmon:
mem_stats = {k: -(v//-(1024*1024)) for k, v in shared.mem_mon.stop().items()}
active_peak = mem_stats['active_peak']
reserved_peak = mem_stats['reserved_peak']
sys_peak = mem_stats['system_peak']
sys_total = mem_stats['total']
sys_pct = round(sys_peak/max(sys_total, 1) * 100, 2)
vram_html = f"<p class='vram'>Torch active/reserved: {active_peak}/{reserved_peak} MiB, <wbr>Sys VRAM: {sys_peak}/{sys_total} MiB ({sys_pct}%)</p>"
else:
vram_html = ''
# last item is always HTML
res[-1] += f"<div class='performance'><p class='time'>Time taken: <wbr>{elapsed:.2f}s</p>{vram_html}</div>"
shared.state.interrupted = False
return tuple(res)
return f
def check_progress_call(id_part):
if shared.state.job_count == 0:
return "", gr_show(False), gr_show(False)
progress = 0
if shared.state.job_count > 0:
progress += shared.state.job_no / shared.state.job_count
if shared.state.sampling_steps > 0:
progress += 1 / shared.state.job_count * shared.state.sampling_step / shared.state.sampling_steps
progress = min(progress, 1)
progressbar = ""
if opts.show_progressbar:
progressbar = f"""<div class='progressDiv'><div class='progress' style="width:{progress * 100}%">{str(int(progress*100))+"%" if progress > 0.01 else ""}</div></div>"""
image = gr_show(False)
preview_visibility = gr_show(False)
if opts.show_progress_every_n_steps > 0:
if shared.parallel_processing_allowed:
if shared.state.sampling_step - shared.state.current_image_sampling_step >= opts.show_progress_every_n_steps and shared.state.current_latent is not None:
shared.state.current_image = modules.sd_samplers.sample_to_image(shared.state.current_latent)
shared.state.current_image_sampling_step = shared.state.sampling_step
image = shared.state.current_image
if image is None:
image = gr.update(value=None)
else:
preview_visibility = gr_show(True)
return f"<span id='{id_part}_progress_span' style='display: none'>{time.time()}</span><p>{progressbar}</p>", preview_visibility, image
def check_progress_call_initial(id_part):
shared.state.job_count = -1
shared.state.current_latent = None
shared.state.current_image = None
return check_progress_call(id_part)
def roll_artist(prompt):
allowed_cats = set([x for x in shared.artist_db.categories() if len(opts.random_artist_categories)==0 or x in opts.random_artist_categories])
artist = random.choice([x for x in shared.artist_db.artists if x.category in allowed_cats])
return prompt + ", " + artist.name if prompt != '' else artist.name
def visit(x, func, path=""):
if hasattr(x, 'children'):
for c in x.children:
visit(c, func, path)
elif x.label is not None:
func(path + "/" + str(x.label), x)
def add_style(name: str, prompt: str, negative_prompt: str):
if name is None:
return [gr_show(), gr_show()]
style = modules.styles.PromptStyle(name, prompt, negative_prompt)
shared.prompt_styles.styles[style.name] = style
# Save all loaded prompt styles: this allows us to update the storage format in the future more easily, because we
# reserialize all styles every time we save them
shared.prompt_styles.save_styles(shared.styles_filename)
return [gr.Dropdown.update(visible=True, choices=list(shared.prompt_styles.styles)) for _ in range(4)]
def apply_styles(prompt, prompt_neg, style1_name, style2_name):
prompt = shared.prompt_styles.apply_styles_to_prompt(prompt, [style1_name, style2_name])
prompt_neg = shared.prompt_styles.apply_negative_styles_to_prompt(prompt_neg, [style1_name, style2_name])
return [gr.Textbox.update(value=prompt), gr.Textbox.update(value=prompt_neg), gr.Dropdown.update(value="None"), gr.Dropdown.update(value="None")]
def interrogate(image):
prompt = shared.interrogator.interrogate(image)
return gr_show(True) if prompt is None else prompt
def create_seed_inputs():
with gr.Row():
with gr.Box():
with gr.Row(elem_id='seed_row'):
seed = (gr.Textbox if cmd_opts.use_textbox_seed else gr.Number)(label='Seed', value=-1)
seed.style(container=False)
random_seed = gr.Button(random_symbol, elem_id='random_seed')
reuse_seed = gr.Button(reuse_symbol, elem_id='reuse_seed')
with gr.Box(elem_id='subseed_show_box'):
seed_checkbox = gr.Checkbox(label='Extra', elem_id='subseed_show', value=False)
# Components to show/hide based on the 'Extra' checkbox
seed_extras = []
with gr.Row(visible=False) as seed_extra_row_1:
seed_extras.append(seed_extra_row_1)
with gr.Box():
with gr.Row(elem_id='subseed_row'):
subseed = gr.Number(label='Variation seed', value=-1)
subseed.style(container=False)
random_subseed = gr.Button(random_symbol, elem_id='random_subseed')
reuse_subseed = gr.Button(reuse_symbol, elem_id='reuse_subseed')
subseed_strength = gr.Slider(label='Variation strength', value=0.0, minimum=0, maximum=1, step=0.01)
with gr.Row(visible=False) as seed_extra_row_2:
seed_extras.append(seed_extra_row_2)
seed_resize_from_w = gr.Slider(minimum=0, maximum=2048, step=64, label="Resize seed from width", value=0)
seed_resize_from_h = gr.Slider(minimum=0, maximum=2048, step=64, label="Resize seed from height", value=0)
random_seed.click(fn=lambda: -1, show_progress=False, inputs=[], outputs=[seed])
random_subseed.click(fn=lambda: -1, show_progress=False, inputs=[], outputs=[subseed])
def change_visibility(show):
return {comp: gr_show(show) for comp in seed_extras}
seed_checkbox.change(change_visibility, show_progress=False, inputs=[seed_checkbox], outputs=seed_extras)
return seed, reuse_seed, subseed, reuse_subseed, subseed_strength, seed_resize_from_h, seed_resize_from_w, seed_checkbox
def connect_reuse_seed(seed: gr.Number, reuse_seed: gr.Button, generation_info: gr.Textbox, dummy_component, is_subseed):
""" Connects a 'reuse (sub)seed' button's click event so that it copies last used
(sub)seed value from generation info the to the seed field. If copying subseed and subseed strength
was 0, i.e. no variation seed was used, it copies the normal seed value instead."""
def copy_seed(gen_info_string: str, index):
res = -1
try:
gen_info = json.loads(gen_info_string)
index -= gen_info.get('index_of_first_image', 0)
if is_subseed and gen_info.get('subseed_strength', 0) > 0:
all_subseeds = gen_info.get('all_subseeds', [-1])
res = all_subseeds[index if 0 <= index < len(all_subseeds) else 0]
else:
all_seeds = gen_info.get('all_seeds', [-1])
res = all_seeds[index if 0 <= index < len(all_seeds) else 0]
except json.decoder.JSONDecodeError as e:
if gen_info_string != '':
print("Error parsing JSON generation info:", file=sys.stderr)
print(gen_info_string, file=sys.stderr)
return [res, gr_show(False)]
reuse_seed.click(
fn=copy_seed,
_js="(x, y) => [x, selected_gallery_index()]",
show_progress=False,
inputs=[generation_info, dummy_component],
outputs=[seed, dummy_component]
)
def update_token_counter(text):
tokens, token_count, max_length = model_hijack.tokenize(text)
style_class = ' class="red"' if (token_count > max_length) else ""
return f"<span {style_class}>{token_count}/{max_length}</span>"
def create_toprow(is_img2img):
id_part = "img2img" if is_img2img else "txt2img"
with gr.Row(elem_id="toprow"):
with gr.Column(scale=4):
with gr.Row():
with gr.Column(scale=80):
with gr.Row():
prompt = gr.Textbox(label="Prompt", elem_id=f"{id_part}_prompt", show_label=False, placeholder="Prompt", lines=2)
with gr.Column(scale=1, elem_id="roll_col"):
roll = gr.Button(value=art_symbol, elem_id="roll", visible=len(shared.artist_db.artists) > 0)
paste = gr.Button(value=paste_symbol, elem_id="paste")
token_counter = gr.HTML(value="<span></span>", elem_id=f"{id_part}_token_counter")
hidden_button = gr.Button(visible=False, elem_id=f"{id_part}_token_button")
hidden_button.click(fn=update_token_counter, inputs=[prompt], outputs=[token_counter])
with gr.Column(scale=10, elem_id="style_pos_col"):
prompt_style = gr.Dropdown(label="Style 1", elem_id=f"{id_part}_style_index", choices=[k for k, v in shared.prompt_styles.styles.items()], value=next(iter(shared.prompt_styles.styles.keys())), visible=len(shared.prompt_styles.styles) > 1)
with gr.Row():
with gr.Column(scale=8):
negative_prompt = gr.Textbox(label="Negative prompt", elem_id="negative_prompt", show_label=False, placeholder="Negative prompt", lines=2)
with gr.Column(scale=1, elem_id="style_neg_col"):
prompt_style2 = gr.Dropdown(label="Style 2", elem_id=f"{id_part}_style2_index", choices=[k for k, v in shared.prompt_styles.styles.items()], value=next(iter(shared.prompt_styles.styles.keys())), visible=len(shared.prompt_styles.styles) > 1)
with gr.Column(scale=1):
with gr.Row():
interrupt = gr.Button('Interrupt', elem_id=f"{id_part}_interrupt")
submit = gr.Button('Generate', elem_id="generate", variant='primary')
interrupt.click(
fn=lambda: shared.state.interrupt(),
inputs=[],
outputs=[],
)
with gr.Row():
if is_img2img:
interrogate = gr.Button('Interrogate', elem_id="interrogate")
else:
interrogate = None
prompt_style_apply = gr.Button('Apply style', elem_id="style_apply")
save_style = gr.Button('Create style', elem_id="style_create")
return prompt, roll, prompt_style, negative_prompt, prompt_style2, submit, interrogate, prompt_style_apply, save_style, paste
def setup_progressbar(progressbar, preview, id_part):
check_progress = gr.Button('Check progress', elem_id=f"{id_part}_check_progress", visible=False)
check_progress.click(
fn=lambda: check_progress_call(id_part),
show_progress=False,
inputs=[],
outputs=[progressbar, preview, preview],
)
check_progress_initial = gr.Button('Check progress (first)', elem_id=f"{id_part}_check_progress_initial", visible=False)
check_progress_initial.click(
fn=lambda: check_progress_call_initial(id_part),
show_progress=False,
inputs=[],
outputs=[progressbar, preview, preview],
)
def create_ui(txt2img, img2img, run_extras, run_pnginfo, run_modelmerger):
with gr.Blocks(analytics_enabled=False) as txt2img_interface:
txt2img_prompt, roll, txt2img_prompt_style, txt2img_negative_prompt, txt2img_prompt_style2, submit, _, txt2img_prompt_style_apply, txt2img_save_style, paste = create_toprow(is_img2img=False)
dummy_component = gr.Label(visible=False)
with gr.Row(elem_id='txt2img_progress_row'):
with gr.Column(scale=1):
pass
with gr.Column(scale=1):
progressbar = gr.HTML(elem_id="txt2img_progressbar")
txt2img_preview = gr.Image(elem_id='txt2img_preview', visible=False)
setup_progressbar(progressbar, txt2img_preview, 'txt2img')
with gr.Row().style(equal_height=False):
with gr.Column(variant='panel'):
steps = gr.Slider(minimum=1, maximum=150, step=1, label="Sampling Steps", value=20)
sampler_index = gr.Radio(label='Sampling method', elem_id="txt2img_sampling", choices=[x.name for x in samplers], value=samplers[0].name, type="index")
with gr.Group():
width = gr.Slider(minimum=64, maximum=2048, step=64, label="Width", value=512)
height = gr.Slider(minimum=64, maximum=2048, step=64, label="Height", value=512)
with gr.Row():
restore_faces = gr.Checkbox(label='Restore faces', value=False, visible=len(shared.face_restorers) > 1)
tiling = gr.Checkbox(label='Tiling', value=False)
enable_hr = gr.Checkbox(label='Highres. fix', value=False)
with gr.Row(visible=False) as hr_options:
scale_latent = gr.Checkbox(label='Scale latent', value=False)
denoising_strength = gr.Slider(minimum=0.0, maximum=1.0, step=0.01, label='Denoising strength', value=0.7)
with gr.Row():
batch_count = gr.Slider(minimum=1, maximum=cmd_opts.max_batch_count, step=1, label='Batch count', value=1)
batch_size = gr.Slider(minimum=1, maximum=8, step=1, label='Batch size', value=1)
cfg_scale = gr.Slider(minimum=1.0, maximum=30.0, step=0.5, label='CFG Scale', value=7.0)
seed, reuse_seed, subseed, reuse_subseed, subseed_strength, seed_resize_from_h, seed_resize_from_w, seed_checkbox = create_seed_inputs()
with gr.Group():
custom_inputs = modules.scripts.scripts_txt2img.setup_ui(is_img2img=False)
with gr.Column(variant='panel'):
with gr.Group():
txt2img_preview = gr.Image(elem_id='txt2img_preview', visible=False)
txt2img_gallery = gr.Gallery(label='Output', show_label=False, elem_id='txt2img_gallery').style(grid=4)
with gr.Group():
with gr.Row():
save = gr.Button('Save')
send_to_img2img = gr.Button('Send to img2img')
send_to_inpaint = gr.Button('Send to inpaint')
send_to_extras = gr.Button('Send to extras')
with gr.Group():
html_info = gr.HTML()
generation_info = gr.Textbox(visible=False)
connect_reuse_seed(seed, reuse_seed, generation_info, dummy_component, is_subseed=False)
connect_reuse_seed(subseed, reuse_subseed, generation_info, dummy_component, is_subseed=True)
txt2img_args = dict(
fn=txt2img,
_js="submit",
inputs=[
txt2img_prompt,
txt2img_negative_prompt,
txt2img_prompt_style,
txt2img_prompt_style2,
steps,
sampler_index,
restore_faces,
tiling,
batch_count,
batch_size,
cfg_scale,
seed,
subseed, subseed_strength, seed_resize_from_h, seed_resize_from_w, seed_checkbox,
height,
width,
enable_hr,
scale_latent,
denoising_strength,
] + custom_inputs,
outputs=[
txt2img_gallery,
generation_info,
html_info
],
show_progress=False,
)
txt2img_prompt.submit(**txt2img_args)
submit.click(**txt2img_args)
enable_hr.change(
fn=lambda x: gr_show(x),
inputs=[enable_hr],
outputs=[hr_options],
)
save.click(
fn=wrap_gradio_call(save_files),
_js="(x, y, z) => [x, y, selected_gallery_index()]",
inputs=[
generation_info,
txt2img_gallery,
html_info,
],
outputs=[
html_info,
html_info,
html_info,
]
)
roll.click(
fn=roll_artist,
inputs=[
txt2img_prompt,
],
outputs=[
txt2img_prompt,
]
)
txt2img_paste_fields = [
(txt2img_prompt, "Prompt"),
(txt2img_negative_prompt, "Negative prompt"),
(steps, "Steps"),
(sampler_index, "Sampler"),
(restore_faces, "Face restoration"),
(cfg_scale, "CFG scale"),
(seed, "Seed"),
(width, "Size-1"),
(height, "Size-2"),
(batch_size, "Batch size"),
(subseed, "Variation seed"),
(subseed_strength, "Variation seed strength"),
(seed_resize_from_w, "Seed resize from-1"),
(seed_resize_from_h, "Seed resize from-2"),
(denoising_strength, "Denoising strength"),
(enable_hr, lambda d: "Denoising strength" in d),
(hr_options, lambda d: gr.Row.update(visible="Denoising strength" in d)),
]
modules.generation_parameters_copypaste.connect_paste(paste, txt2img_paste_fields, txt2img_prompt)
with gr.Blocks(analytics_enabled=False) as img2img_interface:
img2img_prompt, roll, img2img_prompt_style, img2img_negative_prompt, img2img_prompt_style2, submit, img2img_interrogate, img2img_prompt_style_apply, img2img_save_style, paste = create_toprow(is_img2img=True)
with gr.Row(elem_id='img2img_progress_row'):
with gr.Column(scale=1):
pass
with gr.Column(scale=1):
progressbar = gr.HTML(elem_id="img2img_progressbar")
img2img_preview = gr.Image(elem_id='img2img_preview', visible=False)
setup_progressbar(progressbar, img2img_preview, 'img2img')
with gr.Row().style(equal_height=False):
with gr.Column(variant='panel'):
with gr.Tabs(elem_id="mode_img2img") as tabs_img2img_mode:
with gr.TabItem('img2img', id='img2img'):
init_img = gr.Image(label="Image for img2img", show_label=False, source="upload", interactive=True, type="pil")
with gr.TabItem('Inpaint', id='inpaint'):
init_img_with_mask = gr.Image(label="Image for inpainting with mask", show_label=False, elem_id="img2maskimg", source="upload", interactive=True, type="pil", tool="sketch", image_mode="RGBA")
init_img_inpaint = gr.Image(label="Image for img2img", show_label=False, source="upload", interactive=True, type="pil", visible=False, elem_id="img_inpaint_base")
init_mask_inpaint = gr.Image(label="Mask", source="upload", interactive=True, type="pil", visible=False, elem_id="img_inpaint_mask")
mask_blur = gr.Slider(label='Mask blur', minimum=0, maximum=64, step=1, value=4)
with gr.Row():
mask_mode = gr.Radio(label="Mask mode", show_label=False, choices=["Draw mask", "Upload mask"], type="index", value="Draw mask", elem_id="mask_mode")
inpainting_mask_invert = gr.Radio(label='Masking mode', show_label=False, choices=['Inpaint masked', 'Inpaint not masked'], value='Inpaint masked', type="index")
inpainting_fill = gr.Radio(label='Masked content', choices=['fill', 'original', 'latent noise', 'latent nothing'], value='fill', type="index")
with gr.Row():
inpaint_full_res = gr.Checkbox(label='Inpaint at full resolution', value=False)
inpaint_full_res_padding = gr.Slider(label='Inpaint at full resolution padding, pixels', minimum=0, maximum=256, step=4, value=32)
with gr.TabItem('Batch img2img', id='batch'):
hidden = '<br>Disabled when launched with --hide-ui-dir-config.' if shared.cmd_opts.hide_ui_dir_config else ''
gr.HTML(f"<p class=\"text-gray-500\">Process images in a directory on the same machine where the server is running.{hidden}</p>")
img2img_batch_input_dir = gr.Textbox(label="Input directory", **shared.hide_dirs)
img2img_batch_output_dir = gr.Textbox(label="Output directory", **shared.hide_dirs)
with gr.Row():
resize_mode = gr.Radio(label="Resize mode", elem_id="resize_mode", show_label=False, choices=["Just resize", "Crop and resize", "Resize and fill"], type="index", value="Just resize")
steps = gr.Slider(minimum=1, maximum=150, step=1, label="Sampling Steps", value=20)
sampler_index = gr.Radio(label='Sampling method', choices=[x.name for x in samplers_for_img2img], value=samplers_for_img2img[0].name, type="index")
with gr.Group():
width = gr.Slider(minimum=64, maximum=2048, step=64, label="Width", value=512)
height = gr.Slider(minimum=64, maximum=2048, step=64, label="Height", value=512)
with gr.Row():
restore_faces = gr.Checkbox(label='Restore faces', value=False, visible=len(shared.face_restorers) > 1)
tiling = gr.Checkbox(label='Tiling', value=False)
with gr.Row():
batch_count = gr.Slider(minimum=1, maximum=cmd_opts.max_batch_count, step=1, label='Batch count', value=1)
batch_size = gr.Slider(minimum=1, maximum=8, step=1, label='Batch size', value=1)
with gr.Group():
cfg_scale = gr.Slider(minimum=1.0, maximum=30.0, step=0.5, label='CFG Scale', value=7.0)
denoising_strength = gr.Slider(minimum=0.0, maximum=1.0, step=0.01, label='Denoising strength', value=0.75)
seed, reuse_seed, subseed, reuse_subseed, subseed_strength, seed_resize_from_h, seed_resize_from_w, seed_checkbox = create_seed_inputs()
with gr.Group():
custom_inputs = modules.scripts.scripts_img2img.setup_ui(is_img2img=True)
with gr.Column(variant='panel'):
with gr.Group():
img2img_preview = gr.Image(elem_id='img2img_preview', visible=False)
img2img_gallery = gr.Gallery(label='Output', show_label=False, elem_id='img2img_gallery').style(grid=4)
with gr.Group():
with gr.Row():
save = gr.Button('Save')
img2img_send_to_img2img = gr.Button('Send to img2img')
img2img_send_to_inpaint = gr.Button('Send to inpaint')
img2img_send_to_extras = gr.Button('Send to extras')
with gr.Group():
html_info = gr.HTML()
generation_info = gr.Textbox(visible=False)
connect_reuse_seed(seed, reuse_seed, generation_info, dummy_component, is_subseed=False)
connect_reuse_seed(subseed, reuse_subseed, generation_info, dummy_component, is_subseed=True)
mask_mode.change(
lambda mode, img: {
init_img_with_mask: gr_show(mode == 0),
init_img_inpaint: gr_show(mode == 1),
init_mask_inpaint: gr_show(mode == 1),
},
inputs=[mask_mode, init_img_with_mask],
outputs=[
init_img_with_mask,
init_img_inpaint,
init_mask_inpaint,
],
)
img2img_args = dict(
fn=img2img,
_js="submit_img2img",
inputs=[
dummy_component,
img2img_prompt,
img2img_negative_prompt,
img2img_prompt_style,
img2img_prompt_style2,
init_img,
init_img_with_mask,
init_img_inpaint,
init_mask_inpaint,
mask_mode,
steps,
sampler_index,
mask_blur,
inpainting_fill,
restore_faces,
tiling,
batch_count,
batch_size,
cfg_scale,
denoising_strength,
seed,
subseed, subseed_strength, seed_resize_from_h, seed_resize_from_w, seed_checkbox,
height,
width,
resize_mode,
inpaint_full_res,
inpaint_full_res_padding,
inpainting_mask_invert,
img2img_batch_input_dir,
img2img_batch_output_dir,
] + custom_inputs,
outputs=[
img2img_gallery,
generation_info,
html_info
],
show_progress=False,
)
img2img_prompt.submit(**img2img_args)
submit.click(**img2img_args)
img2img_interrogate.click(
fn=interrogate,
inputs=[init_img],
outputs=[img2img_prompt],
)
save.click(
fn=wrap_gradio_call(save_files),
_js="(x, y, z) => [x, y, selected_gallery_index()]",
inputs=[
generation_info,
img2img_gallery,
html_info
],
outputs=[
html_info,
html_info,
html_info,
]
)
roll.click(
fn=roll_artist,
inputs=[
img2img_prompt,
],
outputs=[
img2img_prompt,
]
)
prompts = [(txt2img_prompt, txt2img_negative_prompt), (img2img_prompt, img2img_negative_prompt)]
style_dropdowns = [(txt2img_prompt_style, txt2img_prompt_style2), (img2img_prompt_style, img2img_prompt_style2)]
for button, (prompt, negative_prompt) in zip([txt2img_save_style, img2img_save_style], prompts):
button.click(
fn=add_style,
_js="ask_for_style_name",
# Have to pass empty dummy component here, because the JavaScript and Python function have to accept
# the same number of parameters, but we only know the style-name after the JavaScript prompt
inputs=[dummy_component, prompt, negative_prompt],
outputs=[txt2img_prompt_style, img2img_prompt_style, txt2img_prompt_style2, img2img_prompt_style2],
)
for button, (prompt, negative_prompt), (style1, style2) in zip([txt2img_prompt_style_apply, img2img_prompt_style_apply], prompts, style_dropdowns):
button.click(
fn=apply_styles,
inputs=[prompt, negative_prompt, style1, style2],
outputs=[prompt, negative_prompt, style1, style2],
)
img2img_paste_fields = [
(img2img_prompt, "Prompt"),
(img2img_negative_prompt, "Negative prompt"),
(steps, "Steps"),
(sampler_index, "Sampler"),
(restore_faces, "Face restoration"),
(cfg_scale, "CFG scale"),
(seed, "Seed"),
(width, "Size-1"),
(height, "Size-2"),
(batch_size, "Batch size"),
(subseed, "Variation seed"),
(subseed_strength, "Variation seed strength"),
(seed_resize_from_w, "Seed resize from-1"),
(seed_resize_from_h, "Seed resize from-2"),
(denoising_strength, "Denoising strength"),
]
modules.generation_parameters_copypaste.connect_paste(paste, img2img_paste_fields, img2img_prompt)
with gr.Blocks(analytics_enabled=False) as extras_interface:
with gr.Row().style(equal_height=False):
with gr.Column(variant='panel'):
with gr.Tabs(elem_id="mode_extras"):
with gr.TabItem('Single Image'):
extras_image = gr.Image(label="Source", source="upload", interactive=True, type="pil")
with gr.TabItem('Batch Process'):
image_batch = gr.File(label="Batch Process", file_count="multiple", interactive=True, type="file")
upscaling_resize = gr.Slider(minimum=1.0, maximum=4.0, step=0.05, label="Resize", value=2)
with gr.Group():
extras_upscaler_1 = gr.Radio(label='Upscaler 1', choices=[x.name for x in shared.sd_upscalers], value=shared.sd_upscalers[0].name, type="index")
with gr.Group():
extras_upscaler_2 = gr.Radio(label='Upscaler 2', choices=[x.name for x in shared.sd_upscalers], value=shared.sd_upscalers[0].name, type="index")
extras_upscaler_2_visibility = gr.Slider(minimum=0.0, maximum=1.0, step=0.001, label="Upscaler 2 visibility", value=1)
with gr.Group():
gfpgan_visibility = gr.Slider(minimum=0.0, maximum=1.0, step=0.001, label="GFPGAN visibility", value=0, interactive=modules.gfpgan_model.have_gfpgan)
with gr.Group():
codeformer_visibility = gr.Slider(minimum=0.0, maximum=1.0, step=0.001, label="CodeFormer visibility", value=0, interactive=modules.codeformer_model.have_codeformer)
codeformer_weight = gr.Slider(minimum=0.0, maximum=1.0, step=0.001, label="CodeFormer weight (0 = maximum effect, 1 = minimum effect)", value=0, interactive=modules.codeformer_model.have_codeformer)
submit = gr.Button('Generate', elem_id="extras_generate", variant='primary')
with gr.Column(variant='panel'):
result_images = gr.Gallery(label="Result", show_label=False)
html_info_x = gr.HTML()
html_info = gr.HTML()
extras_send_to_img2img = gr.Button('Send to img2img')
extras_send_to_inpaint = gr.Button('Send to inpaint')
submit.click(
fn=run_extras,
_js="get_extras_tab_index",
inputs=[
dummy_component,
extras_image,
image_batch,
gfpgan_visibility,
codeformer_visibility,
codeformer_weight,
upscaling_resize,
extras_upscaler_1,
extras_upscaler_2,
extras_upscaler_2_visibility,
],
outputs=[
result_images,
html_info_x,
html_info,
]
)
extras_send_to_img2img.click(
fn=lambda x: image_from_url_text(x),
_js="extract_image_from_gallery_img2img",
inputs=[result_images],
outputs=[init_img],
)
extras_send_to_inpaint.click(
fn=lambda x: image_from_url_text(x),
_js="extract_image_from_gallery_img2img",
inputs=[result_images],
outputs=[init_img_with_mask],
)
with gr.Blocks(analytics_enabled=False) as pnginfo_interface:
with gr.Row().style(equal_height=False):
with gr.Column(variant='panel'):
image = gr.Image(elem_id="pnginfo_image", label="Source", source="upload", interactive=True, type="pil")
with gr.Column(variant='panel'):
html = gr.HTML()
generation_info = gr.Textbox(visible=False)
html2 = gr.HTML()
with gr.Row():
pnginfo_send_to_txt2img = gr.Button('Send to txt2img')
pnginfo_send_to_img2img = gr.Button('Send to img2img')
image.change(
fn=wrap_gradio_call(run_pnginfo),
inputs=[image],
outputs=[html, generation_info, html2],
)
with gr.Blocks() as modelmerger_interface:
with gr.Row().style(equal_height=False):
with gr.Column(variant='panel'):
gr.HTML(value="<p>A merger of the two checkpoints will be generated in your <b>checkpoint</b> directory.</p>")
with gr.Row():
primary_model_name = gr.Dropdown(modules.sd_models.checkpoint_tiles(), elem_id="modelmerger_primary_model_name", label="Primary Model Name")
secondary_model_name = gr.Dropdown(modules.sd_models.checkpoint_tiles(), elem_id="modelmerger_secondary_model_name", label="Secondary Model Name")
interp_amount = gr.Slider(minimum=0.0, maximum=1.0, step=0.05, label='Interpolation Amount', value=0.3)
interp_method = gr.Radio(choices=["Weighted Sum", "Sigmoid", "Inverse Sigmoid"], value="Weighted Sum", label="Interpolation Method")
save_as_half = gr.Checkbox(value=False, label="Safe as float16")
modelmerger_merge = gr.Button(elem_id="modelmerger_merge", label="Merge", variant='primary')
with gr.Column(variant='panel'):
submit_result = gr.Textbox(elem_id="modelmerger_result", show_label=False)
def create_setting_component(key):
def fun():
return opts.data[key] if key in opts.data else opts.data_labels[key].default
info = opts.data_labels[key]
t = type(info.default)
args = info.component_args() if callable(info.component_args) else info.component_args
if info.component is not None:
comp = info.component
elif t == str:
comp = gr.Textbox
elif t == int:
comp = gr.Number
elif t == bool:
comp = gr.Checkbox
else:
raise Exception(f'bad options item type: {str(t)} for key {key}')
return comp(label=info.label, value=fun, **(args or {}))
components = []
component_dict = {}
def run_settings(*args):
changed = 0
for key, value, comp in zip(opts.data_labels.keys(), args, components):
if not opts.same_type(value, opts.data_labels[key].default):
return f"Bad value for setting {key}: {value}; expecting {type(opts.data_labels[key].default).__name__}"
for key, value, comp in zip(opts.data_labels.keys(), args, components):
comp_args = opts.data_labels[key].component_args
if comp_args and isinstance(comp_args, dict) and comp_args.get('visible') is False:
continue
oldval = opts.data.get(key, None)
opts.data[key] = value
if oldval != value:
if opts.data_labels[key].onchange is not None:
opts.data_labels[key].onchange()
changed += 1
opts.save(shared.config_filename)
return f'{changed} settings changed.', opts.dumpjson()
with gr.Blocks(analytics_enabled=False) as settings_interface:
settings_submit = gr.Button(value="Apply settings", variant='primary')
result = gr.HTML()
settings_cols = 3
items_per_col = int(len(opts.data_labels) * 0.9 / settings_cols)
cols_displayed = 0
items_displayed = 0
previous_section = None
column = None
with gr.Row(elem_id="settings").style(equal_height=False):
for i, (k, item) in enumerate(opts.data_labels.items()):
if previous_section != item.section:
if cols_displayed < settings_cols and (items_displayed >= items_per_col or previous_section is None):
if column is not None:
column.__exit__()
column = gr.Column(variant='panel')
column.__enter__()
items_displayed = 0
cols_displayed += 1
previous_section = item.section
gr.HTML(elem_id="settings_header_text_{}".format(item.section[0]), value='<h1 class="gr-button-lg">{}</h1>'.format(item.section[1]))
component = create_setting_component(k)
component_dict[k] = component
components.append(component)
items_displayed += 1
request_notifications = gr.Button(value='Request browser notifications', elem_id="request_notifications")
request_notifications.click(
fn=lambda: None,
inputs=[],
outputs=[],
_js='function(){}'
)
if column is not None:
column.__exit__()
interfaces = [
(txt2img_interface, "txt2img", "txt2img"),
(img2img_interface, "img2img", "img2img"),
(extras_interface, "Extras", "extras"),
(pnginfo_interface, "PNG Info", "pnginfo"),
(modelmerger_interface, "Checkpoint Merger", "modelmerger"),
(settings_interface, "Settings", "settings"),
]
with open(os.path.join(script_path, "style.css"), "r", encoding="utf8") as file:
css = file.read()
if os.path.exists(os.path.join(script_path, "user.css")):
with open(os.path.join(script_path, "user.css"), "r", encoding="utf8") as file:
usercss = file.read()
css += usercss
if not cmd_opts.no_progressbar_hiding:
css += css_hide_progressbar
with gr.Blocks(css=css, analytics_enabled=False, title="Stable Diffusion") as demo:
with gr.Tabs() as tabs:
for interface, label, ifid in interfaces:
with gr.TabItem(label, id=ifid):
interface.render()
if os.path.exists(os.path.join(script_path, "notification.mp3")):
audio_notification = gr.Audio(interactive=False, value=os.path.join(script_path, "notification.mp3"), elem_id="audio_notification", visible=False)
text_settings = gr.Textbox(elem_id="settings_json", value=lambda: opts.dumpjson(), visible=False)
settings_submit.click(
fn=run_settings,
inputs=components,
outputs=[result, text_settings],
)
modelmerger_merge.click(
fn=run_modelmerger,
inputs=[
primary_model_name,
secondary_model_name,
interp_method,
interp_amount,
save_as_half,
],
outputs=[
submit_result,
primary_model_name,
secondary_model_name,
component_dict['sd_model_checkpoint'],
]
)
paste_field_names = ['Prompt', 'Negative prompt', 'Steps', 'Face restoration', 'Seed', 'Size-1', 'Size-2']
txt2img_fields = [field for field,name in txt2img_paste_fields if name in paste_field_names]
img2img_fields = [field for field,name in img2img_paste_fields if name in paste_field_names]
send_to_img2img.click(
fn=lambda img, *args: (image_from_url_text(img),*args),
_js="(gallery, ...args) => [extract_image_from_gallery_img2img(gallery), ...args]",
inputs=[txt2img_gallery] + txt2img_fields,
outputs=[init_img] + img2img_fields,
)
send_to_inpaint.click(
fn=lambda x, *args: (image_from_url_text(x), *args),
_js="(gallery, ...args) => [extract_image_from_gallery_inpaint(gallery), ...args]",
inputs=[txt2img_gallery] + txt2img_fields,
outputs=[init_img_with_mask] + img2img_fields,
)
img2img_send_to_img2img.click(
fn=lambda x: image_from_url_text(x),
_js="extract_image_from_gallery_img2img",
inputs=[img2img_gallery],
outputs=[init_img],
)
img2img_send_to_inpaint.click(
fn=lambda x: image_from_url_text(x),
_js="extract_image_from_gallery_inpaint",
inputs=[img2img_gallery],
outputs=[init_img_with_mask],
)
send_to_extras.click(
fn=lambda x: image_from_url_text(x),
_js="extract_image_from_gallery_extras",
inputs=[txt2img_gallery],
outputs=[extras_image],
)
img2img_send_to_extras.click(
fn=lambda x: image_from_url_text(x),
_js="extract_image_from_gallery_extras",
inputs=[img2img_gallery],
outputs=[extras_image],
)
modules.generation_parameters_copypaste.connect_paste(pnginfo_send_to_txt2img, txt2img_paste_fields, generation_info, 'switch_to_txt2img')
modules.generation_parameters_copypaste.connect_paste(pnginfo_send_to_img2img, img2img_paste_fields, generation_info, 'switch_to_img2img_img2img')
ui_config_file = cmd_opts.ui_config_file
ui_settings = {}
settings_count = len(ui_settings)
error_loading = False
try:
if os.path.exists(ui_config_file):
with open(ui_config_file, "r", encoding="utf8") as file:
ui_settings = json.load(file)
except Exception:
error_loading = True
print("Error loading settings:", file=sys.stderr)
print(traceback.format_exc(), file=sys.stderr)
def loadsave(path, x):
def apply_field(obj, field, condition=None):
key = path + "/" + field
if getattr(obj,'custom_script_source',None) is not None:
key = 'customscript/' + obj.custom_script_source + '/' + key
if getattr(obj, 'do_not_save_to_config', False):
return
saved_value = ui_settings.get(key, None)
if saved_value is None:
ui_settings[key] = getattr(obj, field)
elif condition is None or condition(saved_value):
setattr(obj, field, saved_value)
if type(x) in [gr.Slider, gr.Radio, gr.Checkbox, gr.Textbox, gr.Number] and x.visible:
apply_field(x, 'visible')
if type(x) == gr.Slider:
apply_field(x, 'value')
apply_field(x, 'minimum')
apply_field(x, 'maximum')
apply_field(x, 'step')
if type(x) == gr.Radio:
apply_field(x, 'value', lambda val: val in x.choices)
if type(x) == gr.Checkbox:
apply_field(x, 'value')
if type(x) == gr.Textbox:
apply_field(x, 'value')
if type(x) == gr.Number:
apply_field(x, 'value')
visit(txt2img_interface, loadsave, "txt2img")
visit(img2img_interface, loadsave, "img2img")
visit(extras_interface, loadsave, "extras")
if not error_loading and (not os.path.exists(ui_config_file) or settings_count != len(ui_settings)):
with open(ui_config_file, "w", encoding="utf8") as file:
json.dump(ui_settings, file, indent=4)
return demo
with open(os.path.join(script_path, "script.js"), "r", encoding="utf8") as jsfile:
javascript = f'<script>{jsfile.read()}</script>'
jsdir = os.path.join(script_path, "javascript")
for filename in sorted(os.listdir(jsdir)):
with open(os.path.join(jsdir, filename), "r", encoding="utf8") as jsfile:
javascript += f"\n<script>{jsfile.read()}</script>"
def template_response(*args, **kwargs):
res = gradio_routes_templates_response(*args, **kwargs)
res.body = res.body.replace(b'</head>', f'{javascript}</head>'.encode("utf8"))
res.init_headers()
return res
gradio_routes_templates_response = gradio.routes.templates.TemplateResponse
gradio.routes.templates.TemplateResponse = template_response
<|code_end|>
|
New samplers are not showing up
I just updated my version to try out the new samplers but they are not showing up. I deleted repositories/k-diffusion as a test but they still dont show up.
Someone on reddit mentioned to do "source venv/bin/activate/" and then to do a pip uninstall k-diffusion, but I have no idea what it means.
How can I get the new samplers to show up in the UI?
Edit: They dont show up in the img2img Tab
|
modules/sd_samplers.py
<|code_start|>from collections import namedtuple
import numpy as np
import torch
import tqdm
from PIL import Image
import inspect
import k_diffusion.sampling
import ldm.models.diffusion.ddim
import ldm.models.diffusion.plms
from modules import prompt_parser
from modules.shared import opts, cmd_opts, state
import modules.shared as shared
SamplerData = namedtuple('SamplerData', ['name', 'constructor', 'aliases'])
samplers_k_diffusion = [
('Euler a', 'sample_euler_ancestral', ['k_euler_a']),
('Euler', 'sample_euler', ['k_euler']),
('LMS', 'sample_lms', ['k_lms']),
('Heun', 'sample_heun', ['k_heun']),
('DPM2', 'sample_dpm_2', ['k_dpm_2']),
('DPM2 a', 'sample_dpm_2_ancestral', ['k_dpm_2_a']),
('DPM fast', 'sample_dpm_fast', ['k_dpm_fast']),
('DPM adaptive', 'sample_dpm_adaptive', ['k_dpm_ad']),
]
samplers_data_k_diffusion = [
SamplerData(label, lambda model, funcname=funcname: KDiffusionSampler(funcname, model), aliases)
for label, funcname, aliases in samplers_k_diffusion
if hasattr(k_diffusion.sampling, funcname)
]
samplers = [
*samplers_data_k_diffusion,
SamplerData('DDIM', lambda model: VanillaStableDiffusionSampler(ldm.models.diffusion.ddim.DDIMSampler, model), []),
SamplerData('PLMS', lambda model: VanillaStableDiffusionSampler(ldm.models.diffusion.plms.PLMSSampler, model), []),
]
samplers_for_img2img = [x for x in samplers if x.name not in ['PLMS', 'DPM fast', 'DPM adaptive']]
sampler_extra_params = {
'sample_euler': ['s_churn', 's_tmin', 's_tmax', 's_noise'],
'sample_heun': ['s_churn', 's_tmin', 's_tmax', 's_noise'],
'sample_dpm_2': ['s_churn', 's_tmin', 's_tmax', 's_noise'],
}
def setup_img2img_steps(p, steps=None):
if opts.img2img_fix_steps or steps is not None:
steps = int((steps or p.steps) / min(p.denoising_strength, 0.999)) if p.denoising_strength > 0 else 0
t_enc = p.steps - 1
else:
steps = p.steps
t_enc = int(min(p.denoising_strength, 0.999) * steps)
return steps, t_enc
def sample_to_image(samples):
x_sample = shared.sd_model.decode_first_stage(samples[0:1].type(shared.sd_model.dtype))[0]
x_sample = torch.clamp((x_sample + 1.0) / 2.0, min=0.0, max=1.0)
x_sample = 255. * np.moveaxis(x_sample.cpu().numpy(), 0, 2)
x_sample = x_sample.astype(np.uint8)
return Image.fromarray(x_sample)
def store_latent(decoded):
state.current_latent = decoded
if opts.show_progress_every_n_steps > 0 and shared.state.sampling_step % opts.show_progress_every_n_steps == 0:
if not shared.parallel_processing_allowed:
shared.state.current_image = sample_to_image(decoded)
def extended_tdqm(sequence, *args, desc=None, **kwargs):
state.sampling_steps = len(sequence)
state.sampling_step = 0
for x in tqdm.tqdm(sequence, *args, desc=state.job, file=shared.progress_print_out, **kwargs):
if state.interrupted:
break
yield x
state.sampling_step += 1
shared.total_tqdm.update()
ldm.models.diffusion.ddim.tqdm = lambda *args, desc=None, **kwargs: extended_tdqm(*args, desc=desc, **kwargs)
ldm.models.diffusion.plms.tqdm = lambda *args, desc=None, **kwargs: extended_tdqm(*args, desc=desc, **kwargs)
class VanillaStableDiffusionSampler:
def __init__(self, constructor, sd_model):
self.sampler = constructor(sd_model)
self.orig_p_sample_ddim = self.sampler.p_sample_ddim if hasattr(self.sampler, 'p_sample_ddim') else self.sampler.p_sample_plms
self.mask = None
self.nmask = None
self.init_latent = None
self.sampler_noises = None
self.step = 0
self.eta = None
self.default_eta = 0.0
def number_of_needed_noises(self, p):
return 0
def p_sample_ddim_hook(self, x_dec, cond, ts, unconditional_conditioning, *args, **kwargs):
cond = prompt_parser.reconstruct_cond_batch(cond, self.step)
unconditional_conditioning = prompt_parser.reconstruct_cond_batch(unconditional_conditioning, self.step)
if self.mask is not None:
img_orig = self.sampler.model.q_sample(self.init_latent, ts)
x_dec = img_orig * self.mask + self.nmask * x_dec
res = self.orig_p_sample_ddim(x_dec, cond, ts, unconditional_conditioning=unconditional_conditioning, *args, **kwargs)
if self.mask is not None:
store_latent(self.init_latent * self.mask + self.nmask * res[1])
else:
store_latent(res[1])
self.step += 1
return res
def initialize(self, p):
self.eta = p.eta or opts.eta_ddim
for fieldname in ['p_sample_ddim', 'p_sample_plms']:
if hasattr(self.sampler, fieldname):
setattr(self.sampler, fieldname, self.p_sample_ddim_hook)
self.mask = p.mask if hasattr(p, 'mask') else None
self.nmask = p.nmask if hasattr(p, 'nmask') else None
def sample_img2img(self, p, x, noise, conditioning, unconditional_conditioning, steps=None):
steps, t_enc = setup_img2img_steps(p, steps)
self.initialize(p)
# existing code fails with cetain step counts, like 9
try:
self.sampler.make_schedule(ddim_num_steps=steps, ddim_eta=self.eta, ddim_discretize=p.ddim_discretize, verbose=False)
except Exception:
self.sampler.make_schedule(ddim_num_steps=steps+1, ddim_eta=self.eta, ddim_discretize=p.ddim_discretize, verbose=False)
x1 = self.sampler.stochastic_encode(x, torch.tensor([t_enc] * int(x.shape[0])).to(shared.device), noise=noise)
self.init_latent = x
self.step = 0
samples = self.sampler.decode(x1, conditioning, t_enc, unconditional_guidance_scale=p.cfg_scale, unconditional_conditioning=unconditional_conditioning)
return samples
def sample(self, p, x, conditioning, unconditional_conditioning, steps=None):
self.initialize(p)
self.init_latent = None
self.step = 0
steps = steps or p.steps
# existing code fails with cetin step counts, like 9
try:
samples_ddim, _ = self.sampler.sample(S=steps, conditioning=conditioning, batch_size=int(x.shape[0]), shape=x[0].shape, verbose=False, unconditional_guidance_scale=p.cfg_scale, unconditional_conditioning=unconditional_conditioning, x_T=x, eta=self.eta)
except Exception:
samples_ddim, _ = self.sampler.sample(S=steps+1, conditioning=conditioning, batch_size=int(x.shape[0]), shape=x[0].shape, verbose=False, unconditional_guidance_scale=p.cfg_scale, unconditional_conditioning=unconditional_conditioning, x_T=x, eta=self.eta)
return samples_ddim
class CFGDenoiser(torch.nn.Module):
def __init__(self, model):
super().__init__()
self.inner_model = model
self.mask = None
self.nmask = None
self.init_latent = None
self.step = 0
def forward(self, x, sigma, uncond, cond, cond_scale):
cond = prompt_parser.reconstruct_cond_batch(cond, self.step)
uncond = prompt_parser.reconstruct_cond_batch(uncond, self.step)
if shared.batch_cond_uncond:
x_in = torch.cat([x] * 2)
sigma_in = torch.cat([sigma] * 2)
cond_in = torch.cat([uncond, cond])
uncond, cond = self.inner_model(x_in, sigma_in, cond=cond_in).chunk(2)
denoised = uncond + (cond - uncond) * cond_scale
else:
uncond = self.inner_model(x, sigma, cond=uncond)
cond = self.inner_model(x, sigma, cond=cond)
denoised = uncond + (cond - uncond) * cond_scale
if self.mask is not None:
denoised = self.init_latent * self.mask + self.nmask * denoised
self.step += 1
return denoised
def extended_trange(sampler, count, *args, **kwargs):
state.sampling_steps = count
state.sampling_step = 0
for x in tqdm.trange(count, *args, desc=state.job, file=shared.progress_print_out, **kwargs):
if state.interrupted:
break
if sampler.stop_at is not None and x > sampler.stop_at:
break
yield x
state.sampling_step += 1
shared.total_tqdm.update()
class TorchHijack:
def __init__(self, kdiff_sampler):
self.kdiff_sampler = kdiff_sampler
def __getattr__(self, item):
if item == 'randn_like':
return self.kdiff_sampler.randn_like
if hasattr(torch, item):
return getattr(torch, item)
raise AttributeError("'{}' object has no attribute '{}'".format(type(self).__name__, item))
class KDiffusionSampler:
def __init__(self, funcname, sd_model):
self.model_wrap = k_diffusion.external.CompVisDenoiser(sd_model, quantize=shared.opts.enable_quantization)
self.funcname = funcname
self.func = getattr(k_diffusion.sampling, self.funcname)
self.extra_params = sampler_extra_params.get(funcname, [])
self.model_wrap_cfg = CFGDenoiser(self.model_wrap)
self.sampler_noises = None
self.sampler_noise_index = 0
self.stop_at = None
self.eta = None
self.default_eta = 1.0
def callback_state(self, d):
store_latent(d["denoised"])
def number_of_needed_noises(self, p):
return p.steps
def randn_like(self, x):
noise = self.sampler_noises[self.sampler_noise_index] if self.sampler_noises is not None and self.sampler_noise_index < len(self.sampler_noises) else None
if noise is not None and x.shape == noise.shape:
res = noise
else:
res = torch.randn_like(x)
self.sampler_noise_index += 1
return res
def initialize(self, p):
self.model_wrap_cfg.mask = p.mask if hasattr(p, 'mask') else None
self.model_wrap_cfg.nmask = p.nmask if hasattr(p, 'nmask') else None
self.model_wrap.step = 0
self.sampler_noise_index = 0
self.eta = p.eta or opts.eta_ancestral
if hasattr(k_diffusion.sampling, 'trange'):
k_diffusion.sampling.trange = lambda *args, **kwargs: extended_trange(self, *args, **kwargs)
if self.sampler_noises is not None:
k_diffusion.sampling.torch = TorchHijack(self)
extra_params_kwargs = {}
for param_name in self.extra_params:
if hasattr(p, param_name) and param_name in inspect.signature(self.func).parameters:
extra_params_kwargs[param_name] = getattr(p, param_name)
if 'eta' in inspect.signature(self.func).parameters:
extra_params_kwargs['eta'] = self.eta
return extra_params_kwargs
def sample_img2img(self, p, x, noise, conditioning, unconditional_conditioning, steps=None):
steps, t_enc = setup_img2img_steps(p, steps)
sigmas = self.model_wrap.get_sigmas(steps)
noise = noise * sigmas[steps - t_enc - 1]
xi = x + noise
extra_params_kwargs = self.initialize(p)
sigma_sched = sigmas[steps - t_enc - 1:]
self.model_wrap_cfg.init_latent = x
return self.func(self.model_wrap_cfg, xi, sigma_sched, extra_args={'cond': conditioning, 'uncond': unconditional_conditioning, 'cond_scale': p.cfg_scale}, disable=False, callback=self.callback_state, **extra_params_kwargs)
def sample(self, p, x, conditioning, unconditional_conditioning, steps=None):
steps = steps or p.steps
sigmas = self.model_wrap.get_sigmas(steps)
x = x * sigmas[0]
extra_params_kwargs = self.initialize(p)
if 'sigma_min' in inspect.signature(self.func).parameters:
extra_params_kwargs['sigma_min'] = self.model_wrap.sigmas[0].item()
extra_params_kwargs['sigma_max'] = self.model_wrap.sigmas[-1].item()
if 'n' in inspect.signature(self.func).parameters:
extra_params_kwargs['n'] = steps
else:
extra_params_kwargs['sigmas'] = sigmas
samples = self.func(self.model_wrap_cfg, x, extra_args={'cond': conditioning, 'uncond': unconditional_conditioning, 'cond_scale': p.cfg_scale}, disable=False, callback=self.callback_state, **extra_params_kwargs)
return samples
<|code_end|>
|
modules/sd_samplers.py
<|code_start|>from collections import namedtuple
import numpy as np
import torch
import tqdm
from PIL import Image
import inspect
from modules.paths import paths
sys.path.insert(0, paths["k_diffusion"])
import k_diffusion.sampling
import ldm.models.diffusion.ddim
import ldm.models.diffusion.plms
from modules import prompt_parser
from modules.shared import opts, cmd_opts, state
import modules.shared as shared
SamplerData = namedtuple('SamplerData', ['name', 'constructor', 'aliases'])
samplers_k_diffusion = [
('Euler a', 'sample_euler_ancestral', ['k_euler_a']),
('Euler', 'sample_euler', ['k_euler']),
('LMS', 'sample_lms', ['k_lms']),
('Heun', 'sample_heun', ['k_heun']),
('DPM2', 'sample_dpm_2', ['k_dpm_2']),
('DPM2 a', 'sample_dpm_2_ancestral', ['k_dpm_2_a']),
('DPM fast', 'sample_dpm_fast', ['k_dpm_fast']),
('DPM adaptive', 'sample_dpm_adaptive', ['k_dpm_ad']),
]
samplers_data_k_diffusion = [
SamplerData(label, lambda model, funcname=funcname: KDiffusionSampler(funcname, model), aliases)
for label, funcname, aliases in samplers_k_diffusion
if hasattr(k_diffusion.sampling, funcname)
]
samplers = [
*samplers_data_k_diffusion,
SamplerData('DDIM', lambda model: VanillaStableDiffusionSampler(ldm.models.diffusion.ddim.DDIMSampler, model), []),
SamplerData('PLMS', lambda model: VanillaStableDiffusionSampler(ldm.models.diffusion.plms.PLMSSampler, model), []),
]
samplers_for_img2img = [x for x in samplers if x.name not in ['PLMS', 'DPM fast', 'DPM adaptive']]
sampler_extra_params = {
'sample_euler': ['s_churn', 's_tmin', 's_tmax', 's_noise'],
'sample_heun': ['s_churn', 's_tmin', 's_tmax', 's_noise'],
'sample_dpm_2': ['s_churn', 's_tmin', 's_tmax', 's_noise'],
}
def setup_img2img_steps(p, steps=None):
if opts.img2img_fix_steps or steps is not None:
steps = int((steps or p.steps) / min(p.denoising_strength, 0.999)) if p.denoising_strength > 0 else 0
t_enc = p.steps - 1
else:
steps = p.steps
t_enc = int(min(p.denoising_strength, 0.999) * steps)
return steps, t_enc
def sample_to_image(samples):
x_sample = shared.sd_model.decode_first_stage(samples[0:1].type(shared.sd_model.dtype))[0]
x_sample = torch.clamp((x_sample + 1.0) / 2.0, min=0.0, max=1.0)
x_sample = 255. * np.moveaxis(x_sample.cpu().numpy(), 0, 2)
x_sample = x_sample.astype(np.uint8)
return Image.fromarray(x_sample)
def store_latent(decoded):
state.current_latent = decoded
if opts.show_progress_every_n_steps > 0 and shared.state.sampling_step % opts.show_progress_every_n_steps == 0:
if not shared.parallel_processing_allowed:
shared.state.current_image = sample_to_image(decoded)
def extended_tdqm(sequence, *args, desc=None, **kwargs):
state.sampling_steps = len(sequence)
state.sampling_step = 0
for x in tqdm.tqdm(sequence, *args, desc=state.job, file=shared.progress_print_out, **kwargs):
if state.interrupted:
break
yield x
state.sampling_step += 1
shared.total_tqdm.update()
ldm.models.diffusion.ddim.tqdm = lambda *args, desc=None, **kwargs: extended_tdqm(*args, desc=desc, **kwargs)
ldm.models.diffusion.plms.tqdm = lambda *args, desc=None, **kwargs: extended_tdqm(*args, desc=desc, **kwargs)
class VanillaStableDiffusionSampler:
def __init__(self, constructor, sd_model):
self.sampler = constructor(sd_model)
self.orig_p_sample_ddim = self.sampler.p_sample_ddim if hasattr(self.sampler, 'p_sample_ddim') else self.sampler.p_sample_plms
self.mask = None
self.nmask = None
self.init_latent = None
self.sampler_noises = None
self.step = 0
self.eta = None
self.default_eta = 0.0
def number_of_needed_noises(self, p):
return 0
def p_sample_ddim_hook(self, x_dec, cond, ts, unconditional_conditioning, *args, **kwargs):
cond = prompt_parser.reconstruct_cond_batch(cond, self.step)
unconditional_conditioning = prompt_parser.reconstruct_cond_batch(unconditional_conditioning, self.step)
if self.mask is not None:
img_orig = self.sampler.model.q_sample(self.init_latent, ts)
x_dec = img_orig * self.mask + self.nmask * x_dec
res = self.orig_p_sample_ddim(x_dec, cond, ts, unconditional_conditioning=unconditional_conditioning, *args, **kwargs)
if self.mask is not None:
store_latent(self.init_latent * self.mask + self.nmask * res[1])
else:
store_latent(res[1])
self.step += 1
return res
def initialize(self, p):
self.eta = p.eta or opts.eta_ddim
for fieldname in ['p_sample_ddim', 'p_sample_plms']:
if hasattr(self.sampler, fieldname):
setattr(self.sampler, fieldname, self.p_sample_ddim_hook)
self.mask = p.mask if hasattr(p, 'mask') else None
self.nmask = p.nmask if hasattr(p, 'nmask') else None
def sample_img2img(self, p, x, noise, conditioning, unconditional_conditioning, steps=None):
steps, t_enc = setup_img2img_steps(p, steps)
self.initialize(p)
# existing code fails with cetain step counts, like 9
try:
self.sampler.make_schedule(ddim_num_steps=steps, ddim_eta=self.eta, ddim_discretize=p.ddim_discretize, verbose=False)
except Exception:
self.sampler.make_schedule(ddim_num_steps=steps+1, ddim_eta=self.eta, ddim_discretize=p.ddim_discretize, verbose=False)
x1 = self.sampler.stochastic_encode(x, torch.tensor([t_enc] * int(x.shape[0])).to(shared.device), noise=noise)
self.init_latent = x
self.step = 0
samples = self.sampler.decode(x1, conditioning, t_enc, unconditional_guidance_scale=p.cfg_scale, unconditional_conditioning=unconditional_conditioning)
return samples
def sample(self, p, x, conditioning, unconditional_conditioning, steps=None):
self.initialize(p)
self.init_latent = None
self.step = 0
steps = steps or p.steps
# existing code fails with cetin step counts, like 9
try:
samples_ddim, _ = self.sampler.sample(S=steps, conditioning=conditioning, batch_size=int(x.shape[0]), shape=x[0].shape, verbose=False, unconditional_guidance_scale=p.cfg_scale, unconditional_conditioning=unconditional_conditioning, x_T=x, eta=self.eta)
except Exception:
samples_ddim, _ = self.sampler.sample(S=steps+1, conditioning=conditioning, batch_size=int(x.shape[0]), shape=x[0].shape, verbose=False, unconditional_guidance_scale=p.cfg_scale, unconditional_conditioning=unconditional_conditioning, x_T=x, eta=self.eta)
return samples_ddim
class CFGDenoiser(torch.nn.Module):
def __init__(self, model):
super().__init__()
self.inner_model = model
self.mask = None
self.nmask = None
self.init_latent = None
self.step = 0
def forward(self, x, sigma, uncond, cond, cond_scale):
cond = prompt_parser.reconstruct_cond_batch(cond, self.step)
uncond = prompt_parser.reconstruct_cond_batch(uncond, self.step)
if shared.batch_cond_uncond:
x_in = torch.cat([x] * 2)
sigma_in = torch.cat([sigma] * 2)
cond_in = torch.cat([uncond, cond])
uncond, cond = self.inner_model(x_in, sigma_in, cond=cond_in).chunk(2)
denoised = uncond + (cond - uncond) * cond_scale
else:
uncond = self.inner_model(x, sigma, cond=uncond)
cond = self.inner_model(x, sigma, cond=cond)
denoised = uncond + (cond - uncond) * cond_scale
if self.mask is not None:
denoised = self.init_latent * self.mask + self.nmask * denoised
self.step += 1
return denoised
def extended_trange(sampler, count, *args, **kwargs):
state.sampling_steps = count
state.sampling_step = 0
for x in tqdm.trange(count, *args, desc=state.job, file=shared.progress_print_out, **kwargs):
if state.interrupted:
break
if sampler.stop_at is not None and x > sampler.stop_at:
break
yield x
state.sampling_step += 1
shared.total_tqdm.update()
class TorchHijack:
def __init__(self, kdiff_sampler):
self.kdiff_sampler = kdiff_sampler
def __getattr__(self, item):
if item == 'randn_like':
return self.kdiff_sampler.randn_like
if hasattr(torch, item):
return getattr(torch, item)
raise AttributeError("'{}' object has no attribute '{}'".format(type(self).__name__, item))
class KDiffusionSampler:
def __init__(self, funcname, sd_model):
self.model_wrap = k_diffusion.external.CompVisDenoiser(sd_model, quantize=shared.opts.enable_quantization)
self.funcname = funcname
self.func = getattr(k_diffusion.sampling, self.funcname)
self.extra_params = sampler_extra_params.get(funcname, [])
self.model_wrap_cfg = CFGDenoiser(self.model_wrap)
self.sampler_noises = None
self.sampler_noise_index = 0
self.stop_at = None
self.eta = None
self.default_eta = 1.0
def callback_state(self, d):
store_latent(d["denoised"])
def number_of_needed_noises(self, p):
return p.steps
def randn_like(self, x):
noise = self.sampler_noises[self.sampler_noise_index] if self.sampler_noises is not None and self.sampler_noise_index < len(self.sampler_noises) else None
if noise is not None and x.shape == noise.shape:
res = noise
else:
res = torch.randn_like(x)
self.sampler_noise_index += 1
return res
def initialize(self, p):
self.model_wrap_cfg.mask = p.mask if hasattr(p, 'mask') else None
self.model_wrap_cfg.nmask = p.nmask if hasattr(p, 'nmask') else None
self.model_wrap.step = 0
self.sampler_noise_index = 0
self.eta = p.eta or opts.eta_ancestral
if hasattr(k_diffusion.sampling, 'trange'):
k_diffusion.sampling.trange = lambda *args, **kwargs: extended_trange(self, *args, **kwargs)
if self.sampler_noises is not None:
k_diffusion.sampling.torch = TorchHijack(self)
extra_params_kwargs = {}
for param_name in self.extra_params:
if hasattr(p, param_name) and param_name in inspect.signature(self.func).parameters:
extra_params_kwargs[param_name] = getattr(p, param_name)
if 'eta' in inspect.signature(self.func).parameters:
extra_params_kwargs['eta'] = self.eta
return extra_params_kwargs
def sample_img2img(self, p, x, noise, conditioning, unconditional_conditioning, steps=None):
steps, t_enc = setup_img2img_steps(p, steps)
sigmas = self.model_wrap.get_sigmas(steps)
noise = noise * sigmas[steps - t_enc - 1]
xi = x + noise
extra_params_kwargs = self.initialize(p)
sigma_sched = sigmas[steps - t_enc - 1:]
self.model_wrap_cfg.init_latent = x
return self.func(self.model_wrap_cfg, xi, sigma_sched, extra_args={'cond': conditioning, 'uncond': unconditional_conditioning, 'cond_scale': p.cfg_scale}, disable=False, callback=self.callback_state, **extra_params_kwargs)
def sample(self, p, x, conditioning, unconditional_conditioning, steps=None):
steps = steps or p.steps
sigmas = self.model_wrap.get_sigmas(steps)
x = x * sigmas[0]
extra_params_kwargs = self.initialize(p)
if 'sigma_min' in inspect.signature(self.func).parameters:
extra_params_kwargs['sigma_min'] = self.model_wrap.sigmas[0].item()
extra_params_kwargs['sigma_max'] = self.model_wrap.sigmas[-1].item()
if 'n' in inspect.signature(self.func).parameters:
extra_params_kwargs['n'] = steps
else:
extra_params_kwargs['sigmas'] = sigmas
samples = self.func(self.model_wrap_cfg, x, extra_args={'cond': conditioning, 'uncond': unconditional_conditioning, 'cond_scale': p.cfg_scale}, disable=False, callback=self.callback_state, **extra_params_kwargs)
return samples
<|code_end|>
|
Script reload without restart
I write my own scripts and its getting quite complex. I have to change settings and values quite often, and when I do, I need to restart with the webui-user to be able to see the changes.
I am not an experienced python coder, and using gradio for this is too difficult for me.
Is there any way to enable or implement reloading scripts without having to restart? Maybe a kind soul wants to give me a hint how I could do it?
I think not many people write custom scripts so I think the chances of this being implemented are quite small, but maybe there is a right way to do it...
|
modules/scripts.py
<|code_start|>import os
import sys
import traceback
import modules.ui as ui
import gradio as gr
from modules.processing import StableDiffusionProcessing
from modules import shared
class Script:
filename = None
args_from = None
args_to = None
# The title of the script. This is what will be displayed in the dropdown menu.
def title(self):
raise NotImplementedError()
# How the script is displayed in the UI. See https://gradio.app/docs/#components
# for the different UI components you can use and how to create them.
# Most UI components can return a value, such as a boolean for a checkbox.
# The returned values are passed to the run method as parameters.
def ui(self, is_img2img):
pass
# Determines when the script should be shown in the dropdown menu via the
# returned value. As an example:
# is_img2img is True if the current tab is img2img, and False if it is txt2img.
# Thus, return is_img2img to only show the script on the img2img tab.
def show(self, is_img2img):
return True
# This is where the additional processing is implemented. The parameters include
# self, the model object "p" (a StableDiffusionProcessing class, see
# processing.py), and the parameters returned by the ui method.
# Custom functions can be defined here, and additional libraries can be imported
# to be used in processing. The return value should be a Processed object, which is
# what is returned by the process_images method.
def run(self, *args):
raise NotImplementedError()
# The description method is currently unused.
# To add a description that appears when hovering over the title, amend the "titles"
# dict in script.js to include the script title (returned by title) as a key, and
# your description as the value.
def describe(self):
return ""
scripts_data = []
def load_scripts(basedir):
if not os.path.exists(basedir):
return
for filename in sorted(os.listdir(basedir)):
path = os.path.join(basedir, filename)
if not os.path.isfile(path):
continue
try:
with open(path, "r", encoding="utf8") as file:
text = file.read()
from types import ModuleType
compiled = compile(text, path, 'exec')
module = ModuleType(filename)
exec(compiled, module.__dict__)
for key, script_class in module.__dict__.items():
if type(script_class) == type and issubclass(script_class, Script):
scripts_data.append((script_class, path))
except Exception:
print(f"Error loading script: {filename}", file=sys.stderr)
print(traceback.format_exc(), file=sys.stderr)
def wrap_call(func, filename, funcname, *args, default=None, **kwargs):
try:
res = func(*args, **kwargs)
return res
except Exception:
print(f"Error calling: {filename}/{funcname}", file=sys.stderr)
print(traceback.format_exc(), file=sys.stderr)
return default
class ScriptRunner:
def __init__(self):
self.scripts = []
def setup_ui(self, is_img2img):
for script_class, path in scripts_data:
script = script_class()
script.filename = path
if not script.show(is_img2img):
continue
self.scripts.append(script)
titles = [wrap_call(script.title, script.filename, "title") or f"{script.filename} [error]" for script in self.scripts]
dropdown = gr.Dropdown(label="Script", choices=["None"] + titles, value="None", type="index")
inputs = [dropdown]
for script in self.scripts:
script.args_from = len(inputs)
script.args_to = len(inputs)
controls = wrap_call(script.ui, script.filename, "ui", is_img2img)
if controls is None:
continue
for control in controls:
control.custom_script_source = os.path.basename(script.filename)
control.visible = False
inputs += controls
script.args_to = len(inputs)
def select_script(script_index):
if 0 < script_index <= len(self.scripts):
script = self.scripts[script_index-1]
args_from = script.args_from
args_to = script.args_to
else:
args_from = 0
args_to = 0
return [ui.gr_show(True if i == 0 else args_from <= i < args_to) for i in range(len(inputs))]
dropdown.change(
fn=select_script,
inputs=[dropdown],
outputs=inputs
)
return inputs
def run(self, p: StableDiffusionProcessing, *args):
script_index = args[0]
if script_index == 0:
return None
script = self.scripts[script_index-1]
if script is None:
return None
script_args = args[script.args_from:script.args_to]
processed = script.run(p, *script_args)
shared.total_tqdm.clear()
return processed
scripts_txt2img = ScriptRunner()
scripts_img2img = ScriptRunner()
<|code_end|>
modules/ui.py
<|code_start|>import base64
import html
import io
import json
import math
import mimetypes
import os
import random
import sys
import time
import traceback
import platform
import subprocess as sp
from functools import reduce
import numpy as np
import torch
from PIL import Image, PngImagePlugin
import piexif
import gradio as gr
import gradio.utils
import gradio.routes
from modules import sd_hijack
from modules.paths import script_path
from modules.shared import opts, cmd_opts
import modules.shared as shared
from modules.sd_samplers import samplers, samplers_for_img2img
from modules.sd_hijack import model_hijack
import modules.ldsr_model
import modules.scripts
import modules.gfpgan_model
import modules.codeformer_model
import modules.styles
import modules.generation_parameters_copypaste
from modules.prompt_parser import get_learned_conditioning_prompt_schedules
from modules.images import apply_filename_pattern, get_next_sequence_number
import modules.textual_inversion.ui
# this is a fix for Windows users. Without it, javascript files will be served with text/html content-type and the bowser will not show any UI
mimetypes.init()
mimetypes.add_type('application/javascript', '.js')
if not cmd_opts.share and not cmd_opts.listen:
# fix gradio phoning home
gradio.utils.version_check = lambda: None
gradio.utils.get_local_ip_address = lambda: '127.0.0.1'
def gr_show(visible=True):
return {"visible": visible, "__type__": "update"}
sample_img2img = "assets/stable-samples/img2img/sketch-mountains-input.jpg"
sample_img2img = sample_img2img if os.path.exists(sample_img2img) else None
css_hide_progressbar = """
.wrap .m-12 svg { display:none!important; }
.wrap .m-12::before { content:"Loading..." }
.progress-bar { display:none!important; }
.meta-text { display:none!important; }
"""
# Using constants for these since the variation selector isn't visible.
# Important that they exactly match script.js for tooltip to work.
random_symbol = '\U0001f3b2\ufe0f' # 🎲️
reuse_symbol = '\u267b\ufe0f' # ♻️
art_symbol = '\U0001f3a8' # 🎨
paste_symbol = '\u2199\ufe0f' # ↙
folder_symbol = '\uD83D\uDCC2'
def plaintext_to_html(text):
text = "<p>" + "<br>\n".join([f"{html.escape(x)}" for x in text.split('\n')]) + "</p>"
return text
def image_from_url_text(filedata):
if type(filedata) == list:
if len(filedata) == 0:
return None
filedata = filedata[0]
if filedata.startswith("data:image/png;base64,"):
filedata = filedata[len("data:image/png;base64,"):]
filedata = base64.decodebytes(filedata.encode('utf-8'))
image = Image.open(io.BytesIO(filedata))
return image
def send_gradio_gallery_to_image(x):
if len(x) == 0:
return None
return image_from_url_text(x[0])
def save_files(js_data, images, index):
import csv
filenames = []
#quick dictionary to class object conversion. Its neccesary due apply_filename_pattern requiring it
class MyObject:
def __init__(self, d=None):
if d is not None:
for key, value in d.items():
setattr(self, key, value)
data = json.loads(js_data)
p = MyObject(data)
path = opts.outdir_save
save_to_dirs = opts.save_to_dirs
if save_to_dirs:
dirname = apply_filename_pattern(opts.directories_filename_pattern or "[prompt_words]", p, p.seed, p.prompt)
path = os.path.join(opts.outdir_save, dirname)
os.makedirs(path, exist_ok=True)
if index > -1 and opts.save_selected_only and (index >= data["index_of_first_image"]): # ensures we are looking at a specific non-grid picture, and we have save_selected_only
images = [images[index]]
infotexts = [data["infotexts"][index]]
else:
infotexts = data["infotexts"]
with open(os.path.join(opts.outdir_save, "log.csv"), "a", encoding="utf8", newline='') as file:
at_start = file.tell() == 0
writer = csv.writer(file)
if at_start:
writer.writerow(["prompt", "seed", "width", "height", "sampler", "cfgs", "steps", "filename", "negative_prompt"])
file_decoration = opts.samples_filename_pattern or "[seed]-[prompt_spaces]"
if file_decoration != "":
file_decoration = "-" + file_decoration.lower()
file_decoration = apply_filename_pattern(file_decoration, p, p.seed, p.prompt)
truncated = (file_decoration[:240] + '..') if len(file_decoration) > 240 else file_decoration
filename_base = truncated
extension = opts.samples_format.lower()
basecount = get_next_sequence_number(path, "")
for i, filedata in enumerate(images):
file_number = f"{basecount+i:05}"
filename = file_number + filename_base + f".{extension}"
filepath = os.path.join(path, filename)
if filedata.startswith("data:image/png;base64,"):
filedata = filedata[len("data:image/png;base64,"):]
image = Image.open(io.BytesIO(base64.decodebytes(filedata.encode('utf-8'))))
if opts.enable_pnginfo and extension == 'png':
pnginfo = PngImagePlugin.PngInfo()
pnginfo.add_text('parameters', infotexts[i])
image.save(filepath, pnginfo=pnginfo)
else:
image.save(filepath, quality=opts.jpeg_quality)
if opts.enable_pnginfo and extension in ("jpg", "jpeg", "webp"):
piexif.insert(piexif.dump({"Exif": {
piexif.ExifIFD.UserComment: piexif.helper.UserComment.dump(infotexts[i], encoding="unicode")
}}), filepath)
filenames.append(filename)
writer.writerow([data["prompt"], data["seed"], data["width"], data["height"], data["sampler"], data["cfg_scale"], data["steps"], filenames[0], data["negative_prompt"]])
return '', '', plaintext_to_html(f"Saved: {filenames[0]}")
def wrap_gradio_call(func, extra_outputs=None):
def f(*args, extra_outputs_array=extra_outputs, **kwargs):
run_memmon = opts.memmon_poll_rate > 0 and not shared.mem_mon.disabled
if run_memmon:
shared.mem_mon.monitor()
t = time.perf_counter()
try:
res = list(func(*args, **kwargs))
except Exception as e:
print("Error completing request", file=sys.stderr)
print("Arguments:", args, kwargs, file=sys.stderr)
print(traceback.format_exc(), file=sys.stderr)
shared.state.job = ""
shared.state.job_count = 0
if extra_outputs_array is None:
extra_outputs_array = [None, '']
res = extra_outputs_array + [f"<div class='error'>{plaintext_to_html(type(e).__name__+': '+str(e))}</div>"]
elapsed = time.perf_counter() - t
if run_memmon:
mem_stats = {k: -(v//-(1024*1024)) for k, v in shared.mem_mon.stop().items()}
active_peak = mem_stats['active_peak']
reserved_peak = mem_stats['reserved_peak']
sys_peak = mem_stats['system_peak']
sys_total = mem_stats['total']
sys_pct = round(sys_peak/max(sys_total, 1) * 100, 2)
vram_html = f"<p class='vram'>Torch active/reserved: {active_peak}/{reserved_peak} MiB, <wbr>Sys VRAM: {sys_peak}/{sys_total} MiB ({sys_pct}%)</p>"
else:
vram_html = ''
# last item is always HTML
res[-1] += f"<div class='performance'><p class='time'>Time taken: <wbr>{elapsed:.2f}s</p>{vram_html}</div>"
shared.state.interrupted = False
shared.state.job_count = 0
return tuple(res)
return f
def check_progress_call(id_part):
if shared.state.job_count == 0:
return "", gr_show(False), gr_show(False), gr_show(False)
progress = 0
if shared.state.job_count > 0:
progress += shared.state.job_no / shared.state.job_count
if shared.state.sampling_steps > 0:
progress += 1 / shared.state.job_count * shared.state.sampling_step / shared.state.sampling_steps
progress = min(progress, 1)
progressbar = ""
if opts.show_progressbar:
progressbar = f"""<div class='progressDiv'><div class='progress' style="width:{progress * 100}%">{str(int(progress*100))+"%" if progress > 0.01 else ""}</div></div>"""
image = gr_show(False)
preview_visibility = gr_show(False)
if opts.show_progress_every_n_steps > 0:
if shared.parallel_processing_allowed:
if shared.state.sampling_step - shared.state.current_image_sampling_step >= opts.show_progress_every_n_steps and shared.state.current_latent is not None:
shared.state.current_image = modules.sd_samplers.sample_to_image(shared.state.current_latent)
shared.state.current_image_sampling_step = shared.state.sampling_step
image = shared.state.current_image
if image is None:
image = gr.update(value=None)
else:
preview_visibility = gr_show(True)
if shared.state.textinfo is not None:
textinfo_result = gr.HTML.update(value=shared.state.textinfo, visible=True)
else:
textinfo_result = gr_show(False)
return f"<span id='{id_part}_progress_span' style='display: none'>{time.time()}</span><p>{progressbar}</p>", preview_visibility, image, textinfo_result
def check_progress_call_initial(id_part):
shared.state.job_count = -1
shared.state.current_latent = None
shared.state.current_image = None
shared.state.textinfo = None
return check_progress_call(id_part)
def roll_artist(prompt):
allowed_cats = set([x for x in shared.artist_db.categories() if len(opts.random_artist_categories)==0 or x in opts.random_artist_categories])
artist = random.choice([x for x in shared.artist_db.artists if x.category in allowed_cats])
return prompt + ", " + artist.name if prompt != '' else artist.name
def visit(x, func, path=""):
if hasattr(x, 'children'):
for c in x.children:
visit(c, func, path)
elif x.label is not None:
func(path + "/" + str(x.label), x)
def add_style(name: str, prompt: str, negative_prompt: str):
if name is None:
return [gr_show(), gr_show()]
style = modules.styles.PromptStyle(name, prompt, negative_prompt)
shared.prompt_styles.styles[style.name] = style
# Save all loaded prompt styles: this allows us to update the storage format in the future more easily, because we
# reserialize all styles every time we save them
shared.prompt_styles.save_styles(shared.styles_filename)
return [gr.Dropdown.update(visible=True, choices=list(shared.prompt_styles.styles)) for _ in range(4)]
def apply_styles(prompt, prompt_neg, style1_name, style2_name):
prompt = shared.prompt_styles.apply_styles_to_prompt(prompt, [style1_name, style2_name])
prompt_neg = shared.prompt_styles.apply_negative_styles_to_prompt(prompt_neg, [style1_name, style2_name])
return [gr.Textbox.update(value=prompt), gr.Textbox.update(value=prompt_neg), gr.Dropdown.update(value="None"), gr.Dropdown.update(value="None")]
def interrogate(image):
prompt = shared.interrogator.interrogate(image)
return gr_show(True) if prompt is None else prompt
def create_seed_inputs():
with gr.Row():
with gr.Box():
with gr.Row(elem_id='seed_row'):
seed = (gr.Textbox if cmd_opts.use_textbox_seed else gr.Number)(label='Seed', value=-1)
seed.style(container=False)
random_seed = gr.Button(random_symbol, elem_id='random_seed')
reuse_seed = gr.Button(reuse_symbol, elem_id='reuse_seed')
with gr.Box(elem_id='subseed_show_box'):
seed_checkbox = gr.Checkbox(label='Extra', elem_id='subseed_show', value=False)
# Components to show/hide based on the 'Extra' checkbox
seed_extras = []
with gr.Row(visible=False) as seed_extra_row_1:
seed_extras.append(seed_extra_row_1)
with gr.Box():
with gr.Row(elem_id='subseed_row'):
subseed = gr.Number(label='Variation seed', value=-1)
subseed.style(container=False)
random_subseed = gr.Button(random_symbol, elem_id='random_subseed')
reuse_subseed = gr.Button(reuse_symbol, elem_id='reuse_subseed')
subseed_strength = gr.Slider(label='Variation strength', value=0.0, minimum=0, maximum=1, step=0.01)
with gr.Row(visible=False) as seed_extra_row_2:
seed_extras.append(seed_extra_row_2)
seed_resize_from_w = gr.Slider(minimum=0, maximum=2048, step=64, label="Resize seed from width", value=0)
seed_resize_from_h = gr.Slider(minimum=0, maximum=2048, step=64, label="Resize seed from height", value=0)
random_seed.click(fn=lambda: -1, show_progress=False, inputs=[], outputs=[seed])
random_subseed.click(fn=lambda: -1, show_progress=False, inputs=[], outputs=[subseed])
def change_visibility(show):
return {comp: gr_show(show) for comp in seed_extras}
seed_checkbox.change(change_visibility, show_progress=False, inputs=[seed_checkbox], outputs=seed_extras)
return seed, reuse_seed, subseed, reuse_subseed, subseed_strength, seed_resize_from_h, seed_resize_from_w, seed_checkbox
def connect_reuse_seed(seed: gr.Number, reuse_seed: gr.Button, generation_info: gr.Textbox, dummy_component, is_subseed):
""" Connects a 'reuse (sub)seed' button's click event so that it copies last used
(sub)seed value from generation info the to the seed field. If copying subseed and subseed strength
was 0, i.e. no variation seed was used, it copies the normal seed value instead."""
def copy_seed(gen_info_string: str, index):
res = -1
try:
gen_info = json.loads(gen_info_string)
index -= gen_info.get('index_of_first_image', 0)
if is_subseed and gen_info.get('subseed_strength', 0) > 0:
all_subseeds = gen_info.get('all_subseeds', [-1])
res = all_subseeds[index if 0 <= index < len(all_subseeds) else 0]
else:
all_seeds = gen_info.get('all_seeds', [-1])
res = all_seeds[index if 0 <= index < len(all_seeds) else 0]
except json.decoder.JSONDecodeError as e:
if gen_info_string != '':
print("Error parsing JSON generation info:", file=sys.stderr)
print(gen_info_string, file=sys.stderr)
return [res, gr_show(False)]
reuse_seed.click(
fn=copy_seed,
_js="(x, y) => [x, selected_gallery_index()]",
show_progress=False,
inputs=[generation_info, dummy_component],
outputs=[seed, dummy_component]
)
def update_token_counter(text, steps):
prompt_schedules = get_learned_conditioning_prompt_schedules([text], steps)
flat_prompts = reduce(lambda list1, list2: list1+list2, prompt_schedules)
prompts = [prompt_text for step,prompt_text in flat_prompts]
tokens, token_count, max_length = max([model_hijack.tokenize(prompt) for prompt in prompts], key=lambda args: args[1])
style_class = ' class="red"' if (token_count > max_length) else ""
return f"<span {style_class}>{token_count}/{max_length}</span>"
def create_toprow(is_img2img):
id_part = "img2img" if is_img2img else "txt2img"
with gr.Row(elem_id="toprow"):
with gr.Column(scale=4):
with gr.Row():
with gr.Column(scale=80):
with gr.Row():
prompt = gr.Textbox(label="Prompt", elem_id=f"{id_part}_prompt", show_label=False, placeholder="Prompt", lines=2)
with gr.Column(scale=1, elem_id="roll_col"):
roll = gr.Button(value=art_symbol, elem_id="roll", visible=len(shared.artist_db.artists) > 0)
paste = gr.Button(value=paste_symbol, elem_id="paste")
token_counter = gr.HTML(value="<span></span>", elem_id=f"{id_part}_token_counter")
token_button = gr.Button(visible=False, elem_id=f"{id_part}_token_button")
with gr.Column(scale=10, elem_id="style_pos_col"):
prompt_style = gr.Dropdown(label="Style 1", elem_id=f"{id_part}_style_index", choices=[k for k, v in shared.prompt_styles.styles.items()], value=next(iter(shared.prompt_styles.styles.keys())), visible=len(shared.prompt_styles.styles) > 1)
with gr.Row():
with gr.Column(scale=8):
negative_prompt = gr.Textbox(label="Negative prompt", elem_id="negative_prompt", show_label=False, placeholder="Negative prompt", lines=2)
with gr.Column(scale=1, elem_id="style_neg_col"):
prompt_style2 = gr.Dropdown(label="Style 2", elem_id=f"{id_part}_style2_index", choices=[k for k, v in shared.prompt_styles.styles.items()], value=next(iter(shared.prompt_styles.styles.keys())), visible=len(shared.prompt_styles.styles) > 1)
with gr.Column(scale=1):
with gr.Row():
interrupt = gr.Button('Interrupt', elem_id=f"{id_part}_interrupt")
submit = gr.Button('Generate', elem_id=f"{id_part}_generate", variant='primary')
interrupt.click(
fn=lambda: shared.state.interrupt(),
inputs=[],
outputs=[],
)
with gr.Row():
if is_img2img:
interrogate = gr.Button('Interrogate', elem_id="interrogate")
else:
interrogate = None
prompt_style_apply = gr.Button('Apply style', elem_id="style_apply")
save_style = gr.Button('Create style', elem_id="style_create")
return prompt, roll, prompt_style, negative_prompt, prompt_style2, submit, interrogate, prompt_style_apply, save_style, paste, token_counter, token_button
def setup_progressbar(progressbar, preview, id_part, textinfo=None):
if textinfo is None:
textinfo = gr.HTML(visible=False)
check_progress = gr.Button('Check progress', elem_id=f"{id_part}_check_progress", visible=False)
check_progress.click(
fn=lambda: check_progress_call(id_part),
show_progress=False,
inputs=[],
outputs=[progressbar, preview, preview, textinfo],
)
check_progress_initial = gr.Button('Check progress (first)', elem_id=f"{id_part}_check_progress_initial", visible=False)
check_progress_initial.click(
fn=lambda: check_progress_call_initial(id_part),
show_progress=False,
inputs=[],
outputs=[progressbar, preview, preview, textinfo],
)
def create_ui(wrap_gradio_gpu_call):
import modules.img2img
import modules.txt2img
with gr.Blocks(analytics_enabled=False) as txt2img_interface:
txt2img_prompt, roll, txt2img_prompt_style, txt2img_negative_prompt, txt2img_prompt_style2, submit, _, txt2img_prompt_style_apply, txt2img_save_style, paste, token_counter, token_button = create_toprow(is_img2img=False)
dummy_component = gr.Label(visible=False)
with gr.Row(elem_id='txt2img_progress_row'):
with gr.Column(scale=1):
pass
with gr.Column(scale=1):
progressbar = gr.HTML(elem_id="txt2img_progressbar")
txt2img_preview = gr.Image(elem_id='txt2img_preview', visible=False)
setup_progressbar(progressbar, txt2img_preview, 'txt2img')
with gr.Row().style(equal_height=False):
with gr.Column(variant='panel'):
steps = gr.Slider(minimum=1, maximum=150, step=1, label="Sampling Steps", value=20)
sampler_index = gr.Radio(label='Sampling method', elem_id="txt2img_sampling", choices=[x.name for x in samplers], value=samplers[0].name, type="index")
with gr.Group():
width = gr.Slider(minimum=64, maximum=2048, step=64, label="Width", value=512)
height = gr.Slider(minimum=64, maximum=2048, step=64, label="Height", value=512)
with gr.Row():
restore_faces = gr.Checkbox(label='Restore faces', value=False, visible=len(shared.face_restorers) > 1)
tiling = gr.Checkbox(label='Tiling', value=False)
enable_hr = gr.Checkbox(label='Highres. fix', value=False)
with gr.Row(visible=False) as hr_options:
scale_latent = gr.Checkbox(label='Scale latent', value=False)
denoising_strength = gr.Slider(minimum=0.0, maximum=1.0, step=0.01, label='Denoising strength', value=0.7)
with gr.Row():
batch_count = gr.Slider(minimum=1, maximum=cmd_opts.max_batch_count, step=1, label='Batch count', value=1)
batch_size = gr.Slider(minimum=1, maximum=8, step=1, label='Batch size', value=1)
cfg_scale = gr.Slider(minimum=1.0, maximum=30.0, step=0.5, label='CFG Scale', value=7.0)
seed, reuse_seed, subseed, reuse_subseed, subseed_strength, seed_resize_from_h, seed_resize_from_w, seed_checkbox = create_seed_inputs()
with gr.Group():
custom_inputs = modules.scripts.scripts_txt2img.setup_ui(is_img2img=False)
with gr.Column(variant='panel'):
with gr.Group():
txt2img_preview = gr.Image(elem_id='txt2img_preview', visible=False)
txt2img_gallery = gr.Gallery(label='Output', show_label=False, elem_id='txt2img_gallery').style(grid=4)
with gr.Group():
with gr.Row():
save = gr.Button('Save')
send_to_img2img = gr.Button('Send to img2img')
send_to_inpaint = gr.Button('Send to inpaint')
send_to_extras = gr.Button('Send to extras')
button_id = "hidden_element" if shared.cmd_opts.hide_ui_dir_config else 'open_folder'
open_txt2img_folder = gr.Button(folder_symbol, elem_id=button_id)
with gr.Group():
html_info = gr.HTML()
generation_info = gr.Textbox(visible=False)
connect_reuse_seed(seed, reuse_seed, generation_info, dummy_component, is_subseed=False)
connect_reuse_seed(subseed, reuse_subseed, generation_info, dummy_component, is_subseed=True)
txt2img_args = dict(
fn=wrap_gradio_gpu_call(modules.txt2img.txt2img),
_js="submit",
inputs=[
txt2img_prompt,
txt2img_negative_prompt,
txt2img_prompt_style,
txt2img_prompt_style2,
steps,
sampler_index,
restore_faces,
tiling,
batch_count,
batch_size,
cfg_scale,
seed,
subseed, subseed_strength, seed_resize_from_h, seed_resize_from_w, seed_checkbox,
height,
width,
enable_hr,
scale_latent,
denoising_strength,
] + custom_inputs,
outputs=[
txt2img_gallery,
generation_info,
html_info
],
show_progress=False,
)
txt2img_prompt.submit(**txt2img_args)
submit.click(**txt2img_args)
enable_hr.change(
fn=lambda x: gr_show(x),
inputs=[enable_hr],
outputs=[hr_options],
)
save.click(
fn=wrap_gradio_call(save_files),
_js="(x, y, z) => [x, y, selected_gallery_index()]",
inputs=[
generation_info,
txt2img_gallery,
html_info,
],
outputs=[
html_info,
html_info,
html_info,
]
)
roll.click(
fn=roll_artist,
_js="update_txt2img_tokens",
inputs=[
txt2img_prompt,
],
outputs=[
txt2img_prompt,
]
)
txt2img_paste_fields = [
(txt2img_prompt, "Prompt"),
(txt2img_negative_prompt, "Negative prompt"),
(steps, "Steps"),
(sampler_index, "Sampler"),
(restore_faces, "Face restoration"),
(cfg_scale, "CFG scale"),
(seed, "Seed"),
(width, "Size-1"),
(height, "Size-2"),
(batch_size, "Batch size"),
(subseed, "Variation seed"),
(subseed_strength, "Variation seed strength"),
(seed_resize_from_w, "Seed resize from-1"),
(seed_resize_from_h, "Seed resize from-2"),
(denoising_strength, "Denoising strength"),
(enable_hr, lambda d: "Denoising strength" in d),
(hr_options, lambda d: gr.Row.update(visible="Denoising strength" in d)),
]
modules.generation_parameters_copypaste.connect_paste(paste, txt2img_paste_fields, txt2img_prompt)
token_button.click(fn=update_token_counter, inputs=[txt2img_prompt, steps], outputs=[token_counter])
with gr.Blocks(analytics_enabled=False) as img2img_interface:
img2img_prompt, roll, img2img_prompt_style, img2img_negative_prompt, img2img_prompt_style2, submit, img2img_interrogate, img2img_prompt_style_apply, img2img_save_style, paste, token_counter, token_button = create_toprow(is_img2img=True)
with gr.Row(elem_id='img2img_progress_row'):
with gr.Column(scale=1):
pass
with gr.Column(scale=1):
progressbar = gr.HTML(elem_id="img2img_progressbar")
img2img_preview = gr.Image(elem_id='img2img_preview', visible=False)
setup_progressbar(progressbar, img2img_preview, 'img2img')
with gr.Row().style(equal_height=False):
with gr.Column(variant='panel'):
with gr.Tabs(elem_id="mode_img2img") as tabs_img2img_mode:
with gr.TabItem('img2img', id='img2img'):
init_img = gr.Image(label="Image for img2img", show_label=False, source="upload", interactive=True, type="pil")
with gr.TabItem('Inpaint', id='inpaint'):
init_img_with_mask = gr.Image(label="Image for inpainting with mask", show_label=False, elem_id="img2maskimg", source="upload", interactive=True, type="pil", tool="sketch", image_mode="RGBA")
init_img_inpaint = gr.Image(label="Image for img2img", show_label=False, source="upload", interactive=True, type="pil", visible=False, elem_id="img_inpaint_base")
init_mask_inpaint = gr.Image(label="Mask", source="upload", interactive=True, type="pil", visible=False, elem_id="img_inpaint_mask")
mask_blur = gr.Slider(label='Mask blur', minimum=0, maximum=64, step=1, value=4)
with gr.Row():
mask_mode = gr.Radio(label="Mask mode", show_label=False, choices=["Draw mask", "Upload mask"], type="index", value="Draw mask", elem_id="mask_mode")
inpainting_mask_invert = gr.Radio(label='Masking mode', show_label=False, choices=['Inpaint masked', 'Inpaint not masked'], value='Inpaint masked', type="index")
inpainting_fill = gr.Radio(label='Masked content', choices=['fill', 'original', 'latent noise', 'latent nothing'], value='original', type="index")
with gr.Row():
inpaint_full_res = gr.Checkbox(label='Inpaint at full resolution', value=False)
inpaint_full_res_padding = gr.Slider(label='Inpaint at full resolution padding, pixels', minimum=0, maximum=256, step=4, value=32)
with gr.TabItem('Batch img2img', id='batch'):
hidden = '<br>Disabled when launched with --hide-ui-dir-config.' if shared.cmd_opts.hide_ui_dir_config else ''
gr.HTML(f"<p class=\"text-gray-500\">Process images in a directory on the same machine where the server is running.{hidden}</p>")
img2img_batch_input_dir = gr.Textbox(label="Input directory", **shared.hide_dirs)
img2img_batch_output_dir = gr.Textbox(label="Output directory", **shared.hide_dirs)
with gr.Row():
resize_mode = gr.Radio(label="Resize mode", elem_id="resize_mode", show_label=False, choices=["Just resize", "Crop and resize", "Resize and fill"], type="index", value="Just resize")
steps = gr.Slider(minimum=1, maximum=150, step=1, label="Sampling Steps", value=20)
sampler_index = gr.Radio(label='Sampling method', choices=[x.name for x in samplers_for_img2img], value=samplers_for_img2img[0].name, type="index")
with gr.Group():
width = gr.Slider(minimum=64, maximum=2048, step=64, label="Width", value=512)
height = gr.Slider(minimum=64, maximum=2048, step=64, label="Height", value=512)
with gr.Row():
restore_faces = gr.Checkbox(label='Restore faces', value=False, visible=len(shared.face_restorers) > 1)
tiling = gr.Checkbox(label='Tiling', value=False)
with gr.Row():
batch_count = gr.Slider(minimum=1, maximum=cmd_opts.max_batch_count, step=1, label='Batch count', value=1)
batch_size = gr.Slider(minimum=1, maximum=8, step=1, label='Batch size', value=1)
with gr.Group():
cfg_scale = gr.Slider(minimum=1.0, maximum=30.0, step=0.5, label='CFG Scale', value=7.0)
denoising_strength = gr.Slider(minimum=0.0, maximum=1.0, step=0.01, label='Denoising strength', value=0.75)
seed, reuse_seed, subseed, reuse_subseed, subseed_strength, seed_resize_from_h, seed_resize_from_w, seed_checkbox = create_seed_inputs()
with gr.Group():
custom_inputs = modules.scripts.scripts_img2img.setup_ui(is_img2img=True)
with gr.Column(variant='panel'):
with gr.Group():
img2img_preview = gr.Image(elem_id='img2img_preview', visible=False)
img2img_gallery = gr.Gallery(label='Output', show_label=False, elem_id='img2img_gallery').style(grid=4)
with gr.Group():
with gr.Row():
save = gr.Button('Save')
img2img_send_to_img2img = gr.Button('Send to img2img')
img2img_send_to_inpaint = gr.Button('Send to inpaint')
img2img_send_to_extras = gr.Button('Send to extras')
button_id = "hidden_element" if shared.cmd_opts.hide_ui_dir_config else 'open_folder'
open_img2img_folder = gr.Button(folder_symbol, elem_id=button_id)
with gr.Group():
html_info = gr.HTML()
generation_info = gr.Textbox(visible=False)
connect_reuse_seed(seed, reuse_seed, generation_info, dummy_component, is_subseed=False)
connect_reuse_seed(subseed, reuse_subseed, generation_info, dummy_component, is_subseed=True)
mask_mode.change(
lambda mode, img: {
init_img_with_mask: gr_show(mode == 0),
init_img_inpaint: gr_show(mode == 1),
init_mask_inpaint: gr_show(mode == 1),
},
inputs=[mask_mode, init_img_with_mask],
outputs=[
init_img_with_mask,
init_img_inpaint,
init_mask_inpaint,
],
)
img2img_args = dict(
fn=wrap_gradio_gpu_call(modules.img2img.img2img),
_js="submit_img2img",
inputs=[
dummy_component,
img2img_prompt,
img2img_negative_prompt,
img2img_prompt_style,
img2img_prompt_style2,
init_img,
init_img_with_mask,
init_img_inpaint,
init_mask_inpaint,
mask_mode,
steps,
sampler_index,
mask_blur,
inpainting_fill,
restore_faces,
tiling,
batch_count,
batch_size,
cfg_scale,
denoising_strength,
seed,
subseed, subseed_strength, seed_resize_from_h, seed_resize_from_w, seed_checkbox,
height,
width,
resize_mode,
inpaint_full_res,
inpaint_full_res_padding,
inpainting_mask_invert,
img2img_batch_input_dir,
img2img_batch_output_dir,
] + custom_inputs,
outputs=[
img2img_gallery,
generation_info,
html_info
],
show_progress=False,
)
img2img_prompt.submit(**img2img_args)
submit.click(**img2img_args)
img2img_interrogate.click(
fn=interrogate,
inputs=[init_img],
outputs=[img2img_prompt],
)
save.click(
fn=wrap_gradio_call(save_files),
_js="(x, y, z) => [x, y, selected_gallery_index()]",
inputs=[
generation_info,
img2img_gallery,
html_info
],
outputs=[
html_info,
html_info,
html_info,
]
)
roll.click(
fn=roll_artist,
_js="update_img2img_tokens",
inputs=[
img2img_prompt,
],
outputs=[
img2img_prompt,
]
)
prompts = [(txt2img_prompt, txt2img_negative_prompt), (img2img_prompt, img2img_negative_prompt)]
style_dropdowns = [(txt2img_prompt_style, txt2img_prompt_style2), (img2img_prompt_style, img2img_prompt_style2)]
style_js_funcs = ["update_txt2img_tokens", "update_img2img_tokens"]
for button, (prompt, negative_prompt) in zip([txt2img_save_style, img2img_save_style], prompts):
button.click(
fn=add_style,
_js="ask_for_style_name",
# Have to pass empty dummy component here, because the JavaScript and Python function have to accept
# the same number of parameters, but we only know the style-name after the JavaScript prompt
inputs=[dummy_component, prompt, negative_prompt],
outputs=[txt2img_prompt_style, img2img_prompt_style, txt2img_prompt_style2, img2img_prompt_style2],
)
for button, (prompt, negative_prompt), (style1, style2), js_func in zip([txt2img_prompt_style_apply, img2img_prompt_style_apply], prompts, style_dropdowns, style_js_funcs):
button.click(
fn=apply_styles,
_js=js_func,
inputs=[prompt, negative_prompt, style1, style2],
outputs=[prompt, negative_prompt, style1, style2],
)
img2img_paste_fields = [
(img2img_prompt, "Prompt"),
(img2img_negative_prompt, "Negative prompt"),
(steps, "Steps"),
(sampler_index, "Sampler"),
(restore_faces, "Face restoration"),
(cfg_scale, "CFG scale"),
(seed, "Seed"),
(width, "Size-1"),
(height, "Size-2"),
(batch_size, "Batch size"),
(subseed, "Variation seed"),
(subseed_strength, "Variation seed strength"),
(seed_resize_from_w, "Seed resize from-1"),
(seed_resize_from_h, "Seed resize from-2"),
(denoising_strength, "Denoising strength"),
]
modules.generation_parameters_copypaste.connect_paste(paste, img2img_paste_fields, img2img_prompt)
token_button.click(fn=update_token_counter, inputs=[img2img_prompt, steps], outputs=[token_counter])
with gr.Blocks(analytics_enabled=False) as extras_interface:
with gr.Row().style(equal_height=False):
with gr.Column(variant='panel'):
with gr.Tabs(elem_id="mode_extras"):
with gr.TabItem('Single Image'):
extras_image = gr.Image(label="Source", source="upload", interactive=True, type="pil")
with gr.TabItem('Batch Process'):
image_batch = gr.File(label="Batch Process", file_count="multiple", interactive=True, type="file")
upscaling_resize = gr.Slider(minimum=1.0, maximum=4.0, step=0.05, label="Resize", value=2)
with gr.Group():
extras_upscaler_1 = gr.Radio(label='Upscaler 1', choices=[x.name for x in shared.sd_upscalers], value=shared.sd_upscalers[0].name, type="index")
with gr.Group():
extras_upscaler_2 = gr.Radio(label='Upscaler 2', choices=[x.name for x in shared.sd_upscalers], value=shared.sd_upscalers[0].name, type="index")
extras_upscaler_2_visibility = gr.Slider(minimum=0.0, maximum=1.0, step=0.001, label="Upscaler 2 visibility", value=1)
with gr.Group():
gfpgan_visibility = gr.Slider(minimum=0.0, maximum=1.0, step=0.001, label="GFPGAN visibility", value=0, interactive=modules.gfpgan_model.have_gfpgan)
with gr.Group():
codeformer_visibility = gr.Slider(minimum=0.0, maximum=1.0, step=0.001, label="CodeFormer visibility", value=0, interactive=modules.codeformer_model.have_codeformer)
codeformer_weight = gr.Slider(minimum=0.0, maximum=1.0, step=0.001, label="CodeFormer weight (0 = maximum effect, 1 = minimum effect)", value=0, interactive=modules.codeformer_model.have_codeformer)
submit = gr.Button('Generate', elem_id="extras_generate", variant='primary')
with gr.Column(variant='panel'):
result_images = gr.Gallery(label="Result", show_label=False)
html_info_x = gr.HTML()
html_info = gr.HTML()
extras_send_to_img2img = gr.Button('Send to img2img')
extras_send_to_inpaint = gr.Button('Send to inpaint')
button_id = "hidden_element" if shared.cmd_opts.hide_ui_dir_config else ''
open_extras_folder = gr.Button('Open output directory', elem_id=button_id)
submit.click(
fn=wrap_gradio_gpu_call(modules.extras.run_extras),
_js="get_extras_tab_index",
inputs=[
dummy_component,
extras_image,
image_batch,
gfpgan_visibility,
codeformer_visibility,
codeformer_weight,
upscaling_resize,
extras_upscaler_1,
extras_upscaler_2,
extras_upscaler_2_visibility,
],
outputs=[
result_images,
html_info_x,
html_info,
]
)
extras_send_to_img2img.click(
fn=lambda x: image_from_url_text(x),
_js="extract_image_from_gallery_img2img",
inputs=[result_images],
outputs=[init_img],
)
extras_send_to_inpaint.click(
fn=lambda x: image_from_url_text(x),
_js="extract_image_from_gallery_img2img",
inputs=[result_images],
outputs=[init_img_with_mask],
)
with gr.Blocks(analytics_enabled=False) as pnginfo_interface:
with gr.Row().style(equal_height=False):
with gr.Column(variant='panel'):
image = gr.Image(elem_id="pnginfo_image", label="Source", source="upload", interactive=True, type="pil")
with gr.Column(variant='panel'):
html = gr.HTML()
generation_info = gr.Textbox(visible=False)
html2 = gr.HTML()
with gr.Row():
pnginfo_send_to_txt2img = gr.Button('Send to txt2img')
pnginfo_send_to_img2img = gr.Button('Send to img2img')
image.change(
fn=wrap_gradio_call(modules.extras.run_pnginfo),
inputs=[image],
outputs=[html, generation_info, html2],
)
with gr.Blocks() as modelmerger_interface:
with gr.Row().style(equal_height=False):
with gr.Column(variant='panel'):
gr.HTML(value="<p>A merger of the two checkpoints will be generated in your <b>checkpoint</b> directory.</p>")
with gr.Row():
primary_model_name = gr.Dropdown(modules.sd_models.checkpoint_tiles(), elem_id="modelmerger_primary_model_name", label="Primary Model Name")
secondary_model_name = gr.Dropdown(modules.sd_models.checkpoint_tiles(), elem_id="modelmerger_secondary_model_name", label="Secondary Model Name")
custom_name = gr.Textbox(label="Custom Name (Optional)")
interp_amount = gr.Slider(minimum=0.0, maximum=1.0, step=0.05, label='Interpolation Amount', value=0.3)
interp_method = gr.Radio(choices=["Weighted Sum", "Sigmoid", "Inverse Sigmoid"], value="Weighted Sum", label="Interpolation Method")
save_as_half = gr.Checkbox(value=False, label="Safe as float16")
modelmerger_merge = gr.Button(elem_id="modelmerger_merge", label="Merge", variant='primary')
with gr.Column(variant='panel'):
submit_result = gr.Textbox(elem_id="modelmerger_result", show_label=False)
sd_hijack.model_hijack.embedding_db.load_textual_inversion_embeddings()
with gr.Blocks() as textual_inversion_interface:
with gr.Row().style(equal_height=False):
with gr.Column():
with gr.Group():
gr.HTML(value="<p style='margin-bottom: 0.7em'>Create a new embedding</p>")
new_embedding_name = gr.Textbox(label="Name")
initialization_text = gr.Textbox(label="Initialization text", value="*")
nvpt = gr.Slider(label="Number of vectors per token", minimum=1, maximum=75, step=1, value=1)
with gr.Row():
with gr.Column(scale=3):
gr.HTML(value="")
with gr.Column():
create_embedding = gr.Button(value="Create", variant='primary')
with gr.Group():
gr.HTML(value="<p style='margin-bottom: 0.7em'>Train an embedding; must specify a directory with a set of 512x512 images</p>")
train_embedding_name = gr.Dropdown(label='Embedding', choices=sorted(sd_hijack.model_hijack.embedding_db.word_embeddings.keys()))
learn_rate = gr.Number(label='Learning rate', value=5.0e-03)
dataset_directory = gr.Textbox(label='Dataset directory', placeholder="Path to directory with input images")
log_directory = gr.Textbox(label='Log directory', placeholder="Path to directory where to write outputs", value="textual_inversion")
template_file = gr.Textbox(label='Prompt template file', value=os.path.join(script_path, "textual_inversion_templates", "style_filewords.txt"))
steps = gr.Number(label='Max steps', value=100000, precision=0)
create_image_every = gr.Number(label='Save an image to log directory every N steps, 0 to disable', value=1000, precision=0)
save_embedding_every = gr.Number(label='Save a copy of embedding to log directory every N steps, 0 to disable', value=1000, precision=0)
with gr.Row():
with gr.Column(scale=2):
gr.HTML(value="")
with gr.Column():
with gr.Row():
interrupt_training = gr.Button(value="Interrupt")
train_embedding = gr.Button(value="Train", variant='primary')
with gr.Column():
progressbar = gr.HTML(elem_id="ti_progressbar")
ti_output = gr.Text(elem_id="ti_output", value="", show_label=False)
ti_gallery = gr.Gallery(label='Output', show_label=False, elem_id='ti_gallery').style(grid=4)
ti_preview = gr.Image(elem_id='ti_preview', visible=False)
ti_progress = gr.HTML(elem_id="ti_progress", value="")
ti_outcome = gr.HTML(elem_id="ti_error", value="")
setup_progressbar(progressbar, ti_preview, 'ti', textinfo=ti_progress)
create_embedding.click(
fn=modules.textual_inversion.ui.create_embedding,
inputs=[
new_embedding_name,
initialization_text,
nvpt,
],
outputs=[
train_embedding_name,
ti_output,
ti_outcome,
]
)
train_embedding.click(
fn=wrap_gradio_gpu_call(modules.textual_inversion.ui.train_embedding, extra_outputs=[gr.update()]),
_js="start_training_textual_inversion",
inputs=[
train_embedding_name,
learn_rate,
dataset_directory,
log_directory,
steps,
create_image_every,
save_embedding_every,
template_file,
],
outputs=[
ti_output,
ti_outcome,
]
)
interrupt_training.click(
fn=lambda: shared.state.interrupt(),
inputs=[],
outputs=[],
)
def create_setting_component(key):
def fun():
return opts.data[key] if key in opts.data else opts.data_labels[key].default
info = opts.data_labels[key]
t = type(info.default)
args = info.component_args() if callable(info.component_args) else info.component_args
if info.component is not None:
comp = info.component
elif t == str:
comp = gr.Textbox
elif t == int:
comp = gr.Number
elif t == bool:
comp = gr.Checkbox
else:
raise Exception(f'bad options item type: {str(t)} for key {key}')
return comp(label=info.label, value=fun, **(args or {}))
components = []
component_dict = {}
def open_folder(f):
if not shared.cmd_opts.hide_ui_dir_config:
path = os.path.normpath(f)
if platform.system() == "Windows":
os.startfile(path)
elif platform.system() == "Darwin":
sp.Popen(["open", path])
else:
sp.Popen(["xdg-open", path])
def run_settings(*args):
changed = 0
for key, value, comp in zip(opts.data_labels.keys(), args, components):
if not opts.same_type(value, opts.data_labels[key].default):
return f"Bad value for setting {key}: {value}; expecting {type(opts.data_labels[key].default).__name__}"
for key, value, comp in zip(opts.data_labels.keys(), args, components):
comp_args = opts.data_labels[key].component_args
if comp_args and isinstance(comp_args, dict) and comp_args.get('visible') is False:
continue
oldval = opts.data.get(key, None)
opts.data[key] = value
if oldval != value:
if opts.data_labels[key].onchange is not None:
opts.data_labels[key].onchange()
changed += 1
opts.save(shared.config_filename)
return f'{changed} settings changed.', opts.dumpjson()
with gr.Blocks(analytics_enabled=False) as settings_interface:
settings_submit = gr.Button(value="Apply settings", variant='primary')
result = gr.HTML()
settings_cols = 3
items_per_col = int(len(opts.data_labels) * 0.9 / settings_cols)
cols_displayed = 0
items_displayed = 0
previous_section = None
column = None
with gr.Row(elem_id="settings").style(equal_height=False):
for i, (k, item) in enumerate(opts.data_labels.items()):
if previous_section != item.section:
if cols_displayed < settings_cols and (items_displayed >= items_per_col or previous_section is None):
if column is not None:
column.__exit__()
column = gr.Column(variant='panel')
column.__enter__()
items_displayed = 0
cols_displayed += 1
previous_section = item.section
gr.HTML(elem_id="settings_header_text_{}".format(item.section[0]), value='<h1 class="gr-button-lg">{}</h1>'.format(item.section[1]))
component = create_setting_component(k)
component_dict[k] = component
components.append(component)
items_displayed += 1
request_notifications = gr.Button(value='Request browser notifications', elem_id="request_notifications")
request_notifications.click(
fn=lambda: None,
inputs=[],
outputs=[],
_js='function(){}'
)
if column is not None:
column.__exit__()
interfaces = [
(txt2img_interface, "txt2img", "txt2img"),
(img2img_interface, "img2img", "img2img"),
(extras_interface, "Extras", "extras"),
(pnginfo_interface, "PNG Info", "pnginfo"),
(modelmerger_interface, "Checkpoint Merger", "modelmerger"),
(textual_inversion_interface, "Textual inversion", "ti"),
(settings_interface, "Settings", "settings"),
]
with open(os.path.join(script_path, "style.css"), "r", encoding="utf8") as file:
css = file.read()
if os.path.exists(os.path.join(script_path, "user.css")):
with open(os.path.join(script_path, "user.css"), "r", encoding="utf8") as file:
usercss = file.read()
css += usercss
if not cmd_opts.no_progressbar_hiding:
css += css_hide_progressbar
with gr.Blocks(css=css, analytics_enabled=False, title="Stable Diffusion") as demo:
with gr.Tabs() as tabs:
for interface, label, ifid in interfaces:
with gr.TabItem(label, id=ifid):
interface.render()
if os.path.exists(os.path.join(script_path, "notification.mp3")):
audio_notification = gr.Audio(interactive=False, value=os.path.join(script_path, "notification.mp3"), elem_id="audio_notification", visible=False)
text_settings = gr.Textbox(elem_id="settings_json", value=lambda: opts.dumpjson(), visible=False)
settings_submit.click(
fn=run_settings,
inputs=components,
outputs=[result, text_settings],
)
def modelmerger(*args):
try:
results = modules.extras.run_modelmerger(*args)
except Exception as e:
print("Error loading/saving model file:", file=sys.stderr)
print(traceback.format_exc(), file=sys.stderr)
modules.sd_models.list_models() # to remove the potentially missing models from the list
return ["Error loading/saving model file. It doesn't exist or the name contains illegal characters"] + [gr.Dropdown.update(choices=modules.sd_models.checkpoint_tiles()) for _ in range(3)]
return results
modelmerger_merge.click(
fn=modelmerger,
inputs=[
primary_model_name,
secondary_model_name,
interp_method,
interp_amount,
save_as_half,
custom_name,
],
outputs=[
submit_result,
primary_model_name,
secondary_model_name,
component_dict['sd_model_checkpoint'],
]
)
paste_field_names = ['Prompt', 'Negative prompt', 'Steps', 'Face restoration', 'Seed', 'Size-1', 'Size-2']
txt2img_fields = [field for field,name in txt2img_paste_fields if name in paste_field_names]
img2img_fields = [field for field,name in img2img_paste_fields if name in paste_field_names]
send_to_img2img.click(
fn=lambda img, *args: (image_from_url_text(img),*args),
_js="(gallery, ...args) => [extract_image_from_gallery_img2img(gallery), ...args]",
inputs=[txt2img_gallery] + txt2img_fields,
outputs=[init_img] + img2img_fields,
)
send_to_inpaint.click(
fn=lambda x, *args: (image_from_url_text(x), *args),
_js="(gallery, ...args) => [extract_image_from_gallery_inpaint(gallery), ...args]",
inputs=[txt2img_gallery] + txt2img_fields,
outputs=[init_img_with_mask] + img2img_fields,
)
img2img_send_to_img2img.click(
fn=lambda x: image_from_url_text(x),
_js="extract_image_from_gallery_img2img",
inputs=[img2img_gallery],
outputs=[init_img],
)
img2img_send_to_inpaint.click(
fn=lambda x: image_from_url_text(x),
_js="extract_image_from_gallery_inpaint",
inputs=[img2img_gallery],
outputs=[init_img_with_mask],
)
send_to_extras.click(
fn=lambda x: image_from_url_text(x),
_js="extract_image_from_gallery_extras",
inputs=[txt2img_gallery],
outputs=[extras_image],
)
open_txt2img_folder.click(
fn=lambda: open_folder(opts.outdir_samples or opts.outdir_txt2img_samples),
inputs=[],
outputs=[],
)
open_img2img_folder.click(
fn=lambda: open_folder(opts.outdir_samples or opts.outdir_img2img_samples),
inputs=[],
outputs=[],
)
open_extras_folder.click(
fn=lambda: open_folder(opts.outdir_samples or opts.outdir_extras_samples),
inputs=[],
outputs=[],
)
img2img_send_to_extras.click(
fn=lambda x: image_from_url_text(x),
_js="extract_image_from_gallery_extras",
inputs=[img2img_gallery],
outputs=[extras_image],
)
modules.generation_parameters_copypaste.connect_paste(pnginfo_send_to_txt2img, txt2img_paste_fields, generation_info, 'switch_to_txt2img')
modules.generation_parameters_copypaste.connect_paste(pnginfo_send_to_img2img, img2img_paste_fields, generation_info, 'switch_to_img2img_img2img')
ui_config_file = cmd_opts.ui_config_file
ui_settings = {}
settings_count = len(ui_settings)
error_loading = False
try:
if os.path.exists(ui_config_file):
with open(ui_config_file, "r", encoding="utf8") as file:
ui_settings = json.load(file)
except Exception:
error_loading = True
print("Error loading settings:", file=sys.stderr)
print(traceback.format_exc(), file=sys.stderr)
def loadsave(path, x):
def apply_field(obj, field, condition=None):
key = path + "/" + field
if getattr(obj,'custom_script_source',None) is not None:
key = 'customscript/' + obj.custom_script_source + '/' + key
if getattr(obj, 'do_not_save_to_config', False):
return
saved_value = ui_settings.get(key, None)
if saved_value is None:
ui_settings[key] = getattr(obj, field)
elif condition is None or condition(saved_value):
setattr(obj, field, saved_value)
if type(x) in [gr.Slider, gr.Radio, gr.Checkbox, gr.Textbox, gr.Number] and x.visible:
apply_field(x, 'visible')
if type(x) == gr.Slider:
apply_field(x, 'value')
apply_field(x, 'minimum')
apply_field(x, 'maximum')
apply_field(x, 'step')
if type(x) == gr.Radio:
apply_field(x, 'value', lambda val: val in x.choices)
if type(x) == gr.Checkbox:
apply_field(x, 'value')
if type(x) == gr.Textbox:
apply_field(x, 'value')
if type(x) == gr.Number:
apply_field(x, 'value')
visit(txt2img_interface, loadsave, "txt2img")
visit(img2img_interface, loadsave, "img2img")
visit(extras_interface, loadsave, "extras")
if not error_loading and (not os.path.exists(ui_config_file) or settings_count != len(ui_settings)):
with open(ui_config_file, "w", encoding="utf8") as file:
json.dump(ui_settings, file, indent=4)
return demo
with open(os.path.join(script_path, "script.js"), "r", encoding="utf8") as jsfile:
javascript = f'<script>{jsfile.read()}</script>'
jsdir = os.path.join(script_path, "javascript")
for filename in sorted(os.listdir(jsdir)):
with open(os.path.join(jsdir, filename), "r", encoding="utf8") as jsfile:
javascript += f"\n<script>{jsfile.read()}</script>"
def template_response(*args, **kwargs):
res = gradio_routes_templates_response(*args, **kwargs)
res.body = res.body.replace(b'</head>', f'{javascript}</head>'.encode("utf8"))
res.init_headers()
return res
gradio_routes_templates_response = gradio.routes.templates.TemplateResponse
gradio.routes.templates.TemplateResponse = template_response
<|code_end|>
webui.py
<|code_start|>import os
import signal
import threading
import modules.codeformer_model as codeformer
import modules.extras
import modules.face_restoration
import modules.gfpgan_model as gfpgan
import modules.img2img
import modules.lowvram
import modules.paths
import modules.scripts
import modules.sd_hijack
import modules.sd_models
import modules.shared as shared
import modules.txt2img
import modules.ui
from modules import devices
from modules import modelloader
from modules.paths import script_path
from modules.shared import cmd_opts
modelloader.cleanup_models()
modules.sd_models.setup_model()
codeformer.setup_model(cmd_opts.codeformer_models_path)
gfpgan.setup_model(cmd_opts.gfpgan_models_path)
shared.face_restorers.append(modules.face_restoration.FaceRestoration())
modelloader.load_upscalers()
queue_lock = threading.Lock()
def wrap_queued_call(func):
def f(*args, **kwargs):
with queue_lock:
res = func(*args, **kwargs)
return res
return f
def wrap_gradio_gpu_call(func, extra_outputs=None):
def f(*args, **kwargs):
devices.torch_gc()
shared.state.sampling_step = 0
shared.state.job_count = -1
shared.state.job_no = 0
shared.state.job_timestamp = shared.state.get_job_timestamp()
shared.state.current_latent = None
shared.state.current_image = None
shared.state.current_image_sampling_step = 0
shared.state.interrupted = False
shared.state.textinfo = None
with queue_lock:
res = func(*args, **kwargs)
shared.state.job = ""
shared.state.job_count = 0
devices.torch_gc()
return res
return modules.ui.wrap_gradio_call(f, extra_outputs=extra_outputs)
modules.scripts.load_scripts(os.path.join(script_path, "scripts"))
shared.sd_model = modules.sd_models.load_model()
shared.opts.onchange("sd_model_checkpoint", wrap_queued_call(lambda: modules.sd_models.reload_model_weights(shared.sd_model)))
def webui():
# make the program just exit at ctrl+c without waiting for anything
def sigint_handler(sig, frame):
print(f'Interrupted with signal {sig} in {frame}')
os._exit(0)
signal.signal(signal.SIGINT, sigint_handler)
demo = modules.ui.create_ui(wrap_gradio_gpu_call=wrap_gradio_gpu_call)
demo.launch(
share=cmd_opts.share,
server_name="0.0.0.0" if cmd_opts.listen else None,
server_port=cmd_opts.port,
debug=cmd_opts.gradio_debug,
auth=[tuple(cred.split(':')) for cred in cmd_opts.gradio_auth.strip('"').split(',')] if cmd_opts.gradio_auth else None,
inbrowser=cmd_opts.autolaunch,
)
if __name__ == "__main__":
webui()
<|code_end|>
|
modules/scripts.py
<|code_start|>import os
import sys
import traceback
import modules.ui as ui
import gradio as gr
from modules.processing import StableDiffusionProcessing
from modules import shared
class Script:
filename = None
args_from = None
args_to = None
# The title of the script. This is what will be displayed in the dropdown menu.
def title(self):
raise NotImplementedError()
# How the script is displayed in the UI. See https://gradio.app/docs/#components
# for the different UI components you can use and how to create them.
# Most UI components can return a value, such as a boolean for a checkbox.
# The returned values are passed to the run method as parameters.
def ui(self, is_img2img):
pass
# Determines when the script should be shown in the dropdown menu via the
# returned value. As an example:
# is_img2img is True if the current tab is img2img, and False if it is txt2img.
# Thus, return is_img2img to only show the script on the img2img tab.
def show(self, is_img2img):
return True
# This is where the additional processing is implemented. The parameters include
# self, the model object "p" (a StableDiffusionProcessing class, see
# processing.py), and the parameters returned by the ui method.
# Custom functions can be defined here, and additional libraries can be imported
# to be used in processing. The return value should be a Processed object, which is
# what is returned by the process_images method.
def run(self, *args):
raise NotImplementedError()
# The description method is currently unused.
# To add a description that appears when hovering over the title, amend the "titles"
# dict in script.js to include the script title (returned by title) as a key, and
# your description as the value.
def describe(self):
return ""
scripts_data = []
def load_scripts(basedir):
if not os.path.exists(basedir):
return
for filename in sorted(os.listdir(basedir)):
path = os.path.join(basedir, filename)
if not os.path.isfile(path):
continue
try:
with open(path, "r", encoding="utf8") as file:
text = file.read()
from types import ModuleType
compiled = compile(text, path, 'exec')
module = ModuleType(filename)
exec(compiled, module.__dict__)
for key, script_class in module.__dict__.items():
if type(script_class) == type and issubclass(script_class, Script):
scripts_data.append((script_class, path))
except Exception:
print(f"Error loading script: {filename}", file=sys.stderr)
print(traceback.format_exc(), file=sys.stderr)
def wrap_call(func, filename, funcname, *args, default=None, **kwargs):
try:
res = func(*args, **kwargs)
return res
except Exception:
print(f"Error calling: {filename}/{funcname}", file=sys.stderr)
print(traceback.format_exc(), file=sys.stderr)
return default
class ScriptRunner:
def __init__(self):
self.scripts = []
def setup_ui(self, is_img2img):
for script_class, path in scripts_data:
script = script_class()
script.filename = path
if not script.show(is_img2img):
continue
self.scripts.append(script)
titles = [wrap_call(script.title, script.filename, "title") or f"{script.filename} [error]" for script in self.scripts]
dropdown = gr.Dropdown(label="Script", choices=["None"] + titles, value="None", type="index")
inputs = [dropdown]
for script in self.scripts:
script.args_from = len(inputs)
script.args_to = len(inputs)
controls = wrap_call(script.ui, script.filename, "ui", is_img2img)
if controls is None:
continue
for control in controls:
control.custom_script_source = os.path.basename(script.filename)
control.visible = False
inputs += controls
script.args_to = len(inputs)
def select_script(script_index):
if 0 < script_index <= len(self.scripts):
script = self.scripts[script_index-1]
args_from = script.args_from
args_to = script.args_to
else:
args_from = 0
args_to = 0
return [ui.gr_show(True if i == 0 else args_from <= i < args_to) for i in range(len(inputs))]
dropdown.change(
fn=select_script,
inputs=[dropdown],
outputs=inputs
)
return inputs
def run(self, p: StableDiffusionProcessing, *args):
script_index = args[0]
if script_index == 0:
return None
script = self.scripts[script_index-1]
if script is None:
return None
script_args = args[script.args_from:script.args_to]
processed = script.run(p, *script_args)
shared.total_tqdm.clear()
return processed
def reload_sources(self):
for si, script in list(enumerate(self.scripts)):
with open(script.filename, "r", encoding="utf8") as file:
args_from = script.args_from
args_to = script.args_to
filename = script.filename
text = file.read()
from types import ModuleType
compiled = compile(text, filename, 'exec')
module = ModuleType(script.filename)
exec(compiled, module.__dict__)
for key, script_class in module.__dict__.items():
if type(script_class) == type and issubclass(script_class, Script):
self.scripts[si] = script_class()
self.scripts[si].filename = filename
self.scripts[si].args_from = args_from
self.scripts[si].args_to = args_to
scripts_txt2img = ScriptRunner()
scripts_img2img = ScriptRunner()
def reload_script_body_only():
scripts_txt2img.reload_sources()
scripts_img2img.reload_sources()
def reload_scripts(basedir):
global scripts_txt2img, scripts_img2img
scripts_data.clear()
load_scripts(basedir)
scripts_txt2img = ScriptRunner()
scripts_img2img = ScriptRunner()
<|code_end|>
modules/ui.py
<|code_start|>import base64
import html
import io
import json
import math
import mimetypes
import os
import random
import sys
import time
import traceback
import platform
import subprocess as sp
from functools import reduce
import numpy as np
import torch
from PIL import Image, PngImagePlugin
import piexif
import gradio as gr
import gradio.utils
import gradio.routes
from modules import sd_hijack
from modules.paths import script_path
from modules.shared import opts, cmd_opts
import modules.shared as shared
from modules.sd_samplers import samplers, samplers_for_img2img
from modules.sd_hijack import model_hijack
import modules.ldsr_model
import modules.scripts
import modules.gfpgan_model
import modules.codeformer_model
import modules.styles
import modules.generation_parameters_copypaste
from modules.prompt_parser import get_learned_conditioning_prompt_schedules
from modules.images import apply_filename_pattern, get_next_sequence_number
import modules.textual_inversion.ui
# this is a fix for Windows users. Without it, javascript files will be served with text/html content-type and the bowser will not show any UI
mimetypes.init()
mimetypes.add_type('application/javascript', '.js')
if not cmd_opts.share and not cmd_opts.listen:
# fix gradio phoning home
gradio.utils.version_check = lambda: None
gradio.utils.get_local_ip_address = lambda: '127.0.0.1'
def gr_show(visible=True):
return {"visible": visible, "__type__": "update"}
sample_img2img = "assets/stable-samples/img2img/sketch-mountains-input.jpg"
sample_img2img = sample_img2img if os.path.exists(sample_img2img) else None
css_hide_progressbar = """
.wrap .m-12 svg { display:none!important; }
.wrap .m-12::before { content:"Loading..." }
.progress-bar { display:none!important; }
.meta-text { display:none!important; }
"""
# Using constants for these since the variation selector isn't visible.
# Important that they exactly match script.js for tooltip to work.
random_symbol = '\U0001f3b2\ufe0f' # 🎲️
reuse_symbol = '\u267b\ufe0f' # ♻️
art_symbol = '\U0001f3a8' # 🎨
paste_symbol = '\u2199\ufe0f' # ↙
folder_symbol = '\uD83D\uDCC2'
def plaintext_to_html(text):
text = "<p>" + "<br>\n".join([f"{html.escape(x)}" for x in text.split('\n')]) + "</p>"
return text
def image_from_url_text(filedata):
if type(filedata) == list:
if len(filedata) == 0:
return None
filedata = filedata[0]
if filedata.startswith("data:image/png;base64,"):
filedata = filedata[len("data:image/png;base64,"):]
filedata = base64.decodebytes(filedata.encode('utf-8'))
image = Image.open(io.BytesIO(filedata))
return image
def send_gradio_gallery_to_image(x):
if len(x) == 0:
return None
return image_from_url_text(x[0])
def save_files(js_data, images, index):
import csv
filenames = []
#quick dictionary to class object conversion. Its neccesary due apply_filename_pattern requiring it
class MyObject:
def __init__(self, d=None):
if d is not None:
for key, value in d.items():
setattr(self, key, value)
data = json.loads(js_data)
p = MyObject(data)
path = opts.outdir_save
save_to_dirs = opts.save_to_dirs
if save_to_dirs:
dirname = apply_filename_pattern(opts.directories_filename_pattern or "[prompt_words]", p, p.seed, p.prompt)
path = os.path.join(opts.outdir_save, dirname)
os.makedirs(path, exist_ok=True)
if index > -1 and opts.save_selected_only and (index >= data["index_of_first_image"]): # ensures we are looking at a specific non-grid picture, and we have save_selected_only
images = [images[index]]
infotexts = [data["infotexts"][index]]
else:
infotexts = data["infotexts"]
with open(os.path.join(opts.outdir_save, "log.csv"), "a", encoding="utf8", newline='') as file:
at_start = file.tell() == 0
writer = csv.writer(file)
if at_start:
writer.writerow(["prompt", "seed", "width", "height", "sampler", "cfgs", "steps", "filename", "negative_prompt"])
file_decoration = opts.samples_filename_pattern or "[seed]-[prompt_spaces]"
if file_decoration != "":
file_decoration = "-" + file_decoration.lower()
file_decoration = apply_filename_pattern(file_decoration, p, p.seed, p.prompt)
truncated = (file_decoration[:240] + '..') if len(file_decoration) > 240 else file_decoration
filename_base = truncated
extension = opts.samples_format.lower()
basecount = get_next_sequence_number(path, "")
for i, filedata in enumerate(images):
file_number = f"{basecount+i:05}"
filename = file_number + filename_base + f".{extension}"
filepath = os.path.join(path, filename)
if filedata.startswith("data:image/png;base64,"):
filedata = filedata[len("data:image/png;base64,"):]
image = Image.open(io.BytesIO(base64.decodebytes(filedata.encode('utf-8'))))
if opts.enable_pnginfo and extension == 'png':
pnginfo = PngImagePlugin.PngInfo()
pnginfo.add_text('parameters', infotexts[i])
image.save(filepath, pnginfo=pnginfo)
else:
image.save(filepath, quality=opts.jpeg_quality)
if opts.enable_pnginfo and extension in ("jpg", "jpeg", "webp"):
piexif.insert(piexif.dump({"Exif": {
piexif.ExifIFD.UserComment: piexif.helper.UserComment.dump(infotexts[i], encoding="unicode")
}}), filepath)
filenames.append(filename)
writer.writerow([data["prompt"], data["seed"], data["width"], data["height"], data["sampler"], data["cfg_scale"], data["steps"], filenames[0], data["negative_prompt"]])
return '', '', plaintext_to_html(f"Saved: {filenames[0]}")
def wrap_gradio_call(func, extra_outputs=None):
def f(*args, extra_outputs_array=extra_outputs, **kwargs):
run_memmon = opts.memmon_poll_rate > 0 and not shared.mem_mon.disabled
if run_memmon:
shared.mem_mon.monitor()
t = time.perf_counter()
try:
res = list(func(*args, **kwargs))
except Exception as e:
print("Error completing request", file=sys.stderr)
print("Arguments:", args, kwargs, file=sys.stderr)
print(traceback.format_exc(), file=sys.stderr)
shared.state.job = ""
shared.state.job_count = 0
if extra_outputs_array is None:
extra_outputs_array = [None, '']
res = extra_outputs_array + [f"<div class='error'>{plaintext_to_html(type(e).__name__+': '+str(e))}</div>"]
elapsed = time.perf_counter() - t
if run_memmon:
mem_stats = {k: -(v//-(1024*1024)) for k, v in shared.mem_mon.stop().items()}
active_peak = mem_stats['active_peak']
reserved_peak = mem_stats['reserved_peak']
sys_peak = mem_stats['system_peak']
sys_total = mem_stats['total']
sys_pct = round(sys_peak/max(sys_total, 1) * 100, 2)
vram_html = f"<p class='vram'>Torch active/reserved: {active_peak}/{reserved_peak} MiB, <wbr>Sys VRAM: {sys_peak}/{sys_total} MiB ({sys_pct}%)</p>"
else:
vram_html = ''
# last item is always HTML
res[-1] += f"<div class='performance'><p class='time'>Time taken: <wbr>{elapsed:.2f}s</p>{vram_html}</div>"
shared.state.interrupted = False
shared.state.job_count = 0
return tuple(res)
return f
def check_progress_call(id_part):
if shared.state.job_count == 0:
return "", gr_show(False), gr_show(False), gr_show(False)
progress = 0
if shared.state.job_count > 0:
progress += shared.state.job_no / shared.state.job_count
if shared.state.sampling_steps > 0:
progress += 1 / shared.state.job_count * shared.state.sampling_step / shared.state.sampling_steps
progress = min(progress, 1)
progressbar = ""
if opts.show_progressbar:
progressbar = f"""<div class='progressDiv'><div class='progress' style="width:{progress * 100}%">{str(int(progress*100))+"%" if progress > 0.01 else ""}</div></div>"""
image = gr_show(False)
preview_visibility = gr_show(False)
if opts.show_progress_every_n_steps > 0:
if shared.parallel_processing_allowed:
if shared.state.sampling_step - shared.state.current_image_sampling_step >= opts.show_progress_every_n_steps and shared.state.current_latent is not None:
shared.state.current_image = modules.sd_samplers.sample_to_image(shared.state.current_latent)
shared.state.current_image_sampling_step = shared.state.sampling_step
image = shared.state.current_image
if image is None:
image = gr.update(value=None)
else:
preview_visibility = gr_show(True)
if shared.state.textinfo is not None:
textinfo_result = gr.HTML.update(value=shared.state.textinfo, visible=True)
else:
textinfo_result = gr_show(False)
return f"<span id='{id_part}_progress_span' style='display: none'>{time.time()}</span><p>{progressbar}</p>", preview_visibility, image, textinfo_result
def check_progress_call_initial(id_part):
shared.state.job_count = -1
shared.state.current_latent = None
shared.state.current_image = None
shared.state.textinfo = None
return check_progress_call(id_part)
def roll_artist(prompt):
allowed_cats = set([x for x in shared.artist_db.categories() if len(opts.random_artist_categories)==0 or x in opts.random_artist_categories])
artist = random.choice([x for x in shared.artist_db.artists if x.category in allowed_cats])
return prompt + ", " + artist.name if prompt != '' else artist.name
def visit(x, func, path=""):
if hasattr(x, 'children'):
for c in x.children:
visit(c, func, path)
elif x.label is not None:
func(path + "/" + str(x.label), x)
def add_style(name: str, prompt: str, negative_prompt: str):
if name is None:
return [gr_show(), gr_show()]
style = modules.styles.PromptStyle(name, prompt, negative_prompt)
shared.prompt_styles.styles[style.name] = style
# Save all loaded prompt styles: this allows us to update the storage format in the future more easily, because we
# reserialize all styles every time we save them
shared.prompt_styles.save_styles(shared.styles_filename)
return [gr.Dropdown.update(visible=True, choices=list(shared.prompt_styles.styles)) for _ in range(4)]
def apply_styles(prompt, prompt_neg, style1_name, style2_name):
prompt = shared.prompt_styles.apply_styles_to_prompt(prompt, [style1_name, style2_name])
prompt_neg = shared.prompt_styles.apply_negative_styles_to_prompt(prompt_neg, [style1_name, style2_name])
return [gr.Textbox.update(value=prompt), gr.Textbox.update(value=prompt_neg), gr.Dropdown.update(value="None"), gr.Dropdown.update(value="None")]
def interrogate(image):
prompt = shared.interrogator.interrogate(image)
return gr_show(True) if prompt is None else prompt
def create_seed_inputs():
with gr.Row():
with gr.Box():
with gr.Row(elem_id='seed_row'):
seed = (gr.Textbox if cmd_opts.use_textbox_seed else gr.Number)(label='Seed', value=-1)
seed.style(container=False)
random_seed = gr.Button(random_symbol, elem_id='random_seed')
reuse_seed = gr.Button(reuse_symbol, elem_id='reuse_seed')
with gr.Box(elem_id='subseed_show_box'):
seed_checkbox = gr.Checkbox(label='Extra', elem_id='subseed_show', value=False)
# Components to show/hide based on the 'Extra' checkbox
seed_extras = []
with gr.Row(visible=False) as seed_extra_row_1:
seed_extras.append(seed_extra_row_1)
with gr.Box():
with gr.Row(elem_id='subseed_row'):
subseed = gr.Number(label='Variation seed', value=-1)
subseed.style(container=False)
random_subseed = gr.Button(random_symbol, elem_id='random_subseed')
reuse_subseed = gr.Button(reuse_symbol, elem_id='reuse_subseed')
subseed_strength = gr.Slider(label='Variation strength', value=0.0, minimum=0, maximum=1, step=0.01)
with gr.Row(visible=False) as seed_extra_row_2:
seed_extras.append(seed_extra_row_2)
seed_resize_from_w = gr.Slider(minimum=0, maximum=2048, step=64, label="Resize seed from width", value=0)
seed_resize_from_h = gr.Slider(minimum=0, maximum=2048, step=64, label="Resize seed from height", value=0)
random_seed.click(fn=lambda: -1, show_progress=False, inputs=[], outputs=[seed])
random_subseed.click(fn=lambda: -1, show_progress=False, inputs=[], outputs=[subseed])
def change_visibility(show):
return {comp: gr_show(show) for comp in seed_extras}
seed_checkbox.change(change_visibility, show_progress=False, inputs=[seed_checkbox], outputs=seed_extras)
return seed, reuse_seed, subseed, reuse_subseed, subseed_strength, seed_resize_from_h, seed_resize_from_w, seed_checkbox
def connect_reuse_seed(seed: gr.Number, reuse_seed: gr.Button, generation_info: gr.Textbox, dummy_component, is_subseed):
""" Connects a 'reuse (sub)seed' button's click event so that it copies last used
(sub)seed value from generation info the to the seed field. If copying subseed and subseed strength
was 0, i.e. no variation seed was used, it copies the normal seed value instead."""
def copy_seed(gen_info_string: str, index):
res = -1
try:
gen_info = json.loads(gen_info_string)
index -= gen_info.get('index_of_first_image', 0)
if is_subseed and gen_info.get('subseed_strength', 0) > 0:
all_subseeds = gen_info.get('all_subseeds', [-1])
res = all_subseeds[index if 0 <= index < len(all_subseeds) else 0]
else:
all_seeds = gen_info.get('all_seeds', [-1])
res = all_seeds[index if 0 <= index < len(all_seeds) else 0]
except json.decoder.JSONDecodeError as e:
if gen_info_string != '':
print("Error parsing JSON generation info:", file=sys.stderr)
print(gen_info_string, file=sys.stderr)
return [res, gr_show(False)]
reuse_seed.click(
fn=copy_seed,
_js="(x, y) => [x, selected_gallery_index()]",
show_progress=False,
inputs=[generation_info, dummy_component],
outputs=[seed, dummy_component]
)
def update_token_counter(text, steps):
prompt_schedules = get_learned_conditioning_prompt_schedules([text], steps)
flat_prompts = reduce(lambda list1, list2: list1+list2, prompt_schedules)
prompts = [prompt_text for step,prompt_text in flat_prompts]
tokens, token_count, max_length = max([model_hijack.tokenize(prompt) for prompt in prompts], key=lambda args: args[1])
style_class = ' class="red"' if (token_count > max_length) else ""
return f"<span {style_class}>{token_count}/{max_length}</span>"
def create_toprow(is_img2img):
id_part = "img2img" if is_img2img else "txt2img"
with gr.Row(elem_id="toprow"):
with gr.Column(scale=4):
with gr.Row():
with gr.Column(scale=80):
with gr.Row():
prompt = gr.Textbox(label="Prompt", elem_id=f"{id_part}_prompt", show_label=False, placeholder="Prompt", lines=2)
with gr.Column(scale=1, elem_id="roll_col"):
roll = gr.Button(value=art_symbol, elem_id="roll", visible=len(shared.artist_db.artists) > 0)
paste = gr.Button(value=paste_symbol, elem_id="paste")
token_counter = gr.HTML(value="<span></span>", elem_id=f"{id_part}_token_counter")
token_button = gr.Button(visible=False, elem_id=f"{id_part}_token_button")
with gr.Column(scale=10, elem_id="style_pos_col"):
prompt_style = gr.Dropdown(label="Style 1", elem_id=f"{id_part}_style_index", choices=[k for k, v in shared.prompt_styles.styles.items()], value=next(iter(shared.prompt_styles.styles.keys())), visible=len(shared.prompt_styles.styles) > 1)
with gr.Row():
with gr.Column(scale=8):
negative_prompt = gr.Textbox(label="Negative prompt", elem_id="negative_prompt", show_label=False, placeholder="Negative prompt", lines=2)
with gr.Column(scale=1, elem_id="style_neg_col"):
prompt_style2 = gr.Dropdown(label="Style 2", elem_id=f"{id_part}_style2_index", choices=[k for k, v in shared.prompt_styles.styles.items()], value=next(iter(shared.prompt_styles.styles.keys())), visible=len(shared.prompt_styles.styles) > 1)
with gr.Column(scale=1):
with gr.Row():
interrupt = gr.Button('Interrupt', elem_id=f"{id_part}_interrupt")
submit = gr.Button('Generate', elem_id=f"{id_part}_generate", variant='primary')
interrupt.click(
fn=lambda: shared.state.interrupt(),
inputs=[],
outputs=[],
)
with gr.Row():
if is_img2img:
interrogate = gr.Button('Interrogate', elem_id="interrogate")
else:
interrogate = None
prompt_style_apply = gr.Button('Apply style', elem_id="style_apply")
save_style = gr.Button('Create style', elem_id="style_create")
return prompt, roll, prompt_style, negative_prompt, prompt_style2, submit, interrogate, prompt_style_apply, save_style, paste, token_counter, token_button
def setup_progressbar(progressbar, preview, id_part, textinfo=None):
if textinfo is None:
textinfo = gr.HTML(visible=False)
check_progress = gr.Button('Check progress', elem_id=f"{id_part}_check_progress", visible=False)
check_progress.click(
fn=lambda: check_progress_call(id_part),
show_progress=False,
inputs=[],
outputs=[progressbar, preview, preview, textinfo],
)
check_progress_initial = gr.Button('Check progress (first)', elem_id=f"{id_part}_check_progress_initial", visible=False)
check_progress_initial.click(
fn=lambda: check_progress_call_initial(id_part),
show_progress=False,
inputs=[],
outputs=[progressbar, preview, preview, textinfo],
)
def create_ui(wrap_gradio_gpu_call):
import modules.img2img
import modules.txt2img
with gr.Blocks(analytics_enabled=False) as txt2img_interface:
txt2img_prompt, roll, txt2img_prompt_style, txt2img_negative_prompt, txt2img_prompt_style2, submit, _, txt2img_prompt_style_apply, txt2img_save_style, paste, token_counter, token_button = create_toprow(is_img2img=False)
dummy_component = gr.Label(visible=False)
with gr.Row(elem_id='txt2img_progress_row'):
with gr.Column(scale=1):
pass
with gr.Column(scale=1):
progressbar = gr.HTML(elem_id="txt2img_progressbar")
txt2img_preview = gr.Image(elem_id='txt2img_preview', visible=False)
setup_progressbar(progressbar, txt2img_preview, 'txt2img')
with gr.Row().style(equal_height=False):
with gr.Column(variant='panel'):
steps = gr.Slider(minimum=1, maximum=150, step=1, label="Sampling Steps", value=20)
sampler_index = gr.Radio(label='Sampling method', elem_id="txt2img_sampling", choices=[x.name for x in samplers], value=samplers[0].name, type="index")
with gr.Group():
width = gr.Slider(minimum=64, maximum=2048, step=64, label="Width", value=512)
height = gr.Slider(minimum=64, maximum=2048, step=64, label="Height", value=512)
with gr.Row():
restore_faces = gr.Checkbox(label='Restore faces', value=False, visible=len(shared.face_restorers) > 1)
tiling = gr.Checkbox(label='Tiling', value=False)
enable_hr = gr.Checkbox(label='Highres. fix', value=False)
with gr.Row(visible=False) as hr_options:
scale_latent = gr.Checkbox(label='Scale latent', value=False)
denoising_strength = gr.Slider(minimum=0.0, maximum=1.0, step=0.01, label='Denoising strength', value=0.7)
with gr.Row():
batch_count = gr.Slider(minimum=1, maximum=cmd_opts.max_batch_count, step=1, label='Batch count', value=1)
batch_size = gr.Slider(minimum=1, maximum=8, step=1, label='Batch size', value=1)
cfg_scale = gr.Slider(minimum=1.0, maximum=30.0, step=0.5, label='CFG Scale', value=7.0)
seed, reuse_seed, subseed, reuse_subseed, subseed_strength, seed_resize_from_h, seed_resize_from_w, seed_checkbox = create_seed_inputs()
with gr.Group():
custom_inputs = modules.scripts.scripts_txt2img.setup_ui(is_img2img=False)
with gr.Column(variant='panel'):
with gr.Group():
txt2img_preview = gr.Image(elem_id='txt2img_preview', visible=False)
txt2img_gallery = gr.Gallery(label='Output', show_label=False, elem_id='txt2img_gallery').style(grid=4)
with gr.Group():
with gr.Row():
save = gr.Button('Save')
send_to_img2img = gr.Button('Send to img2img')
send_to_inpaint = gr.Button('Send to inpaint')
send_to_extras = gr.Button('Send to extras')
button_id = "hidden_element" if shared.cmd_opts.hide_ui_dir_config else 'open_folder'
open_txt2img_folder = gr.Button(folder_symbol, elem_id=button_id)
with gr.Group():
html_info = gr.HTML()
generation_info = gr.Textbox(visible=False)
connect_reuse_seed(seed, reuse_seed, generation_info, dummy_component, is_subseed=False)
connect_reuse_seed(subseed, reuse_subseed, generation_info, dummy_component, is_subseed=True)
txt2img_args = dict(
fn=wrap_gradio_gpu_call(modules.txt2img.txt2img),
_js="submit",
inputs=[
txt2img_prompt,
txt2img_negative_prompt,
txt2img_prompt_style,
txt2img_prompt_style2,
steps,
sampler_index,
restore_faces,
tiling,
batch_count,
batch_size,
cfg_scale,
seed,
subseed, subseed_strength, seed_resize_from_h, seed_resize_from_w, seed_checkbox,
height,
width,
enable_hr,
scale_latent,
denoising_strength,
] + custom_inputs,
outputs=[
txt2img_gallery,
generation_info,
html_info
],
show_progress=False,
)
txt2img_prompt.submit(**txt2img_args)
submit.click(**txt2img_args)
enable_hr.change(
fn=lambda x: gr_show(x),
inputs=[enable_hr],
outputs=[hr_options],
)
save.click(
fn=wrap_gradio_call(save_files),
_js="(x, y, z) => [x, y, selected_gallery_index()]",
inputs=[
generation_info,
txt2img_gallery,
html_info,
],
outputs=[
html_info,
html_info,
html_info,
]
)
roll.click(
fn=roll_artist,
_js="update_txt2img_tokens",
inputs=[
txt2img_prompt,
],
outputs=[
txt2img_prompt,
]
)
txt2img_paste_fields = [
(txt2img_prompt, "Prompt"),
(txt2img_negative_prompt, "Negative prompt"),
(steps, "Steps"),
(sampler_index, "Sampler"),
(restore_faces, "Face restoration"),
(cfg_scale, "CFG scale"),
(seed, "Seed"),
(width, "Size-1"),
(height, "Size-2"),
(batch_size, "Batch size"),
(subseed, "Variation seed"),
(subseed_strength, "Variation seed strength"),
(seed_resize_from_w, "Seed resize from-1"),
(seed_resize_from_h, "Seed resize from-2"),
(denoising_strength, "Denoising strength"),
(enable_hr, lambda d: "Denoising strength" in d),
(hr_options, lambda d: gr.Row.update(visible="Denoising strength" in d)),
]
modules.generation_parameters_copypaste.connect_paste(paste, txt2img_paste_fields, txt2img_prompt)
token_button.click(fn=update_token_counter, inputs=[txt2img_prompt, steps], outputs=[token_counter])
with gr.Blocks(analytics_enabled=False) as img2img_interface:
img2img_prompt, roll, img2img_prompt_style, img2img_negative_prompt, img2img_prompt_style2, submit, img2img_interrogate, img2img_prompt_style_apply, img2img_save_style, paste, token_counter, token_button = create_toprow(is_img2img=True)
with gr.Row(elem_id='img2img_progress_row'):
with gr.Column(scale=1):
pass
with gr.Column(scale=1):
progressbar = gr.HTML(elem_id="img2img_progressbar")
img2img_preview = gr.Image(elem_id='img2img_preview', visible=False)
setup_progressbar(progressbar, img2img_preview, 'img2img')
with gr.Row().style(equal_height=False):
with gr.Column(variant='panel'):
with gr.Tabs(elem_id="mode_img2img") as tabs_img2img_mode:
with gr.TabItem('img2img', id='img2img'):
init_img = gr.Image(label="Image for img2img", show_label=False, source="upload", interactive=True, type="pil")
with gr.TabItem('Inpaint', id='inpaint'):
init_img_with_mask = gr.Image(label="Image for inpainting with mask", show_label=False, elem_id="img2maskimg", source="upload", interactive=True, type="pil", tool="sketch", image_mode="RGBA")
init_img_inpaint = gr.Image(label="Image for img2img", show_label=False, source="upload", interactive=True, type="pil", visible=False, elem_id="img_inpaint_base")
init_mask_inpaint = gr.Image(label="Mask", source="upload", interactive=True, type="pil", visible=False, elem_id="img_inpaint_mask")
mask_blur = gr.Slider(label='Mask blur', minimum=0, maximum=64, step=1, value=4)
with gr.Row():
mask_mode = gr.Radio(label="Mask mode", show_label=False, choices=["Draw mask", "Upload mask"], type="index", value="Draw mask", elem_id="mask_mode")
inpainting_mask_invert = gr.Radio(label='Masking mode', show_label=False, choices=['Inpaint masked', 'Inpaint not masked'], value='Inpaint masked', type="index")
inpainting_fill = gr.Radio(label='Masked content', choices=['fill', 'original', 'latent noise', 'latent nothing'], value='original', type="index")
with gr.Row():
inpaint_full_res = gr.Checkbox(label='Inpaint at full resolution', value=False)
inpaint_full_res_padding = gr.Slider(label='Inpaint at full resolution padding, pixels', minimum=0, maximum=256, step=4, value=32)
with gr.TabItem('Batch img2img', id='batch'):
hidden = '<br>Disabled when launched with --hide-ui-dir-config.' if shared.cmd_opts.hide_ui_dir_config else ''
gr.HTML(f"<p class=\"text-gray-500\">Process images in a directory on the same machine where the server is running.{hidden}</p>")
img2img_batch_input_dir = gr.Textbox(label="Input directory", **shared.hide_dirs)
img2img_batch_output_dir = gr.Textbox(label="Output directory", **shared.hide_dirs)
with gr.Row():
resize_mode = gr.Radio(label="Resize mode", elem_id="resize_mode", show_label=False, choices=["Just resize", "Crop and resize", "Resize and fill"], type="index", value="Just resize")
steps = gr.Slider(minimum=1, maximum=150, step=1, label="Sampling Steps", value=20)
sampler_index = gr.Radio(label='Sampling method', choices=[x.name for x in samplers_for_img2img], value=samplers_for_img2img[0].name, type="index")
with gr.Group():
width = gr.Slider(minimum=64, maximum=2048, step=64, label="Width", value=512)
height = gr.Slider(minimum=64, maximum=2048, step=64, label="Height", value=512)
with gr.Row():
restore_faces = gr.Checkbox(label='Restore faces', value=False, visible=len(shared.face_restorers) > 1)
tiling = gr.Checkbox(label='Tiling', value=False)
with gr.Row():
batch_count = gr.Slider(minimum=1, maximum=cmd_opts.max_batch_count, step=1, label='Batch count', value=1)
batch_size = gr.Slider(minimum=1, maximum=8, step=1, label='Batch size', value=1)
with gr.Group():
cfg_scale = gr.Slider(minimum=1.0, maximum=30.0, step=0.5, label='CFG Scale', value=7.0)
denoising_strength = gr.Slider(minimum=0.0, maximum=1.0, step=0.01, label='Denoising strength', value=0.75)
seed, reuse_seed, subseed, reuse_subseed, subseed_strength, seed_resize_from_h, seed_resize_from_w, seed_checkbox = create_seed_inputs()
with gr.Group():
custom_inputs = modules.scripts.scripts_img2img.setup_ui(is_img2img=True)
with gr.Column(variant='panel'):
with gr.Group():
img2img_preview = gr.Image(elem_id='img2img_preview', visible=False)
img2img_gallery = gr.Gallery(label='Output', show_label=False, elem_id='img2img_gallery').style(grid=4)
with gr.Group():
with gr.Row():
save = gr.Button('Save')
img2img_send_to_img2img = gr.Button('Send to img2img')
img2img_send_to_inpaint = gr.Button('Send to inpaint')
img2img_send_to_extras = gr.Button('Send to extras')
button_id = "hidden_element" if shared.cmd_opts.hide_ui_dir_config else 'open_folder'
open_img2img_folder = gr.Button(folder_symbol, elem_id=button_id)
with gr.Group():
html_info = gr.HTML()
generation_info = gr.Textbox(visible=False)
connect_reuse_seed(seed, reuse_seed, generation_info, dummy_component, is_subseed=False)
connect_reuse_seed(subseed, reuse_subseed, generation_info, dummy_component, is_subseed=True)
mask_mode.change(
lambda mode, img: {
init_img_with_mask: gr_show(mode == 0),
init_img_inpaint: gr_show(mode == 1),
init_mask_inpaint: gr_show(mode == 1),
},
inputs=[mask_mode, init_img_with_mask],
outputs=[
init_img_with_mask,
init_img_inpaint,
init_mask_inpaint,
],
)
img2img_args = dict(
fn=wrap_gradio_gpu_call(modules.img2img.img2img),
_js="submit_img2img",
inputs=[
dummy_component,
img2img_prompt,
img2img_negative_prompt,
img2img_prompt_style,
img2img_prompt_style2,
init_img,
init_img_with_mask,
init_img_inpaint,
init_mask_inpaint,
mask_mode,
steps,
sampler_index,
mask_blur,
inpainting_fill,
restore_faces,
tiling,
batch_count,
batch_size,
cfg_scale,
denoising_strength,
seed,
subseed, subseed_strength, seed_resize_from_h, seed_resize_from_w, seed_checkbox,
height,
width,
resize_mode,
inpaint_full_res,
inpaint_full_res_padding,
inpainting_mask_invert,
img2img_batch_input_dir,
img2img_batch_output_dir,
] + custom_inputs,
outputs=[
img2img_gallery,
generation_info,
html_info
],
show_progress=False,
)
img2img_prompt.submit(**img2img_args)
submit.click(**img2img_args)
img2img_interrogate.click(
fn=interrogate,
inputs=[init_img],
outputs=[img2img_prompt],
)
save.click(
fn=wrap_gradio_call(save_files),
_js="(x, y, z) => [x, y, selected_gallery_index()]",
inputs=[
generation_info,
img2img_gallery,
html_info
],
outputs=[
html_info,
html_info,
html_info,
]
)
roll.click(
fn=roll_artist,
_js="update_img2img_tokens",
inputs=[
img2img_prompt,
],
outputs=[
img2img_prompt,
]
)
prompts = [(txt2img_prompt, txt2img_negative_prompt), (img2img_prompt, img2img_negative_prompt)]
style_dropdowns = [(txt2img_prompt_style, txt2img_prompt_style2), (img2img_prompt_style, img2img_prompt_style2)]
style_js_funcs = ["update_txt2img_tokens", "update_img2img_tokens"]
for button, (prompt, negative_prompt) in zip([txt2img_save_style, img2img_save_style], prompts):
button.click(
fn=add_style,
_js="ask_for_style_name",
# Have to pass empty dummy component here, because the JavaScript and Python function have to accept
# the same number of parameters, but we only know the style-name after the JavaScript prompt
inputs=[dummy_component, prompt, negative_prompt],
outputs=[txt2img_prompt_style, img2img_prompt_style, txt2img_prompt_style2, img2img_prompt_style2],
)
for button, (prompt, negative_prompt), (style1, style2), js_func in zip([txt2img_prompt_style_apply, img2img_prompt_style_apply], prompts, style_dropdowns, style_js_funcs):
button.click(
fn=apply_styles,
_js=js_func,
inputs=[prompt, negative_prompt, style1, style2],
outputs=[prompt, negative_prompt, style1, style2],
)
img2img_paste_fields = [
(img2img_prompt, "Prompt"),
(img2img_negative_prompt, "Negative prompt"),
(steps, "Steps"),
(sampler_index, "Sampler"),
(restore_faces, "Face restoration"),
(cfg_scale, "CFG scale"),
(seed, "Seed"),
(width, "Size-1"),
(height, "Size-2"),
(batch_size, "Batch size"),
(subseed, "Variation seed"),
(subseed_strength, "Variation seed strength"),
(seed_resize_from_w, "Seed resize from-1"),
(seed_resize_from_h, "Seed resize from-2"),
(denoising_strength, "Denoising strength"),
]
modules.generation_parameters_copypaste.connect_paste(paste, img2img_paste_fields, img2img_prompt)
token_button.click(fn=update_token_counter, inputs=[img2img_prompt, steps], outputs=[token_counter])
with gr.Blocks(analytics_enabled=False) as extras_interface:
with gr.Row().style(equal_height=False):
with gr.Column(variant='panel'):
with gr.Tabs(elem_id="mode_extras"):
with gr.TabItem('Single Image'):
extras_image = gr.Image(label="Source", source="upload", interactive=True, type="pil")
with gr.TabItem('Batch Process'):
image_batch = gr.File(label="Batch Process", file_count="multiple", interactive=True, type="file")
upscaling_resize = gr.Slider(minimum=1.0, maximum=4.0, step=0.05, label="Resize", value=2)
with gr.Group():
extras_upscaler_1 = gr.Radio(label='Upscaler 1', choices=[x.name for x in shared.sd_upscalers], value=shared.sd_upscalers[0].name, type="index")
with gr.Group():
extras_upscaler_2 = gr.Radio(label='Upscaler 2', choices=[x.name for x in shared.sd_upscalers], value=shared.sd_upscalers[0].name, type="index")
extras_upscaler_2_visibility = gr.Slider(minimum=0.0, maximum=1.0, step=0.001, label="Upscaler 2 visibility", value=1)
with gr.Group():
gfpgan_visibility = gr.Slider(minimum=0.0, maximum=1.0, step=0.001, label="GFPGAN visibility", value=0, interactive=modules.gfpgan_model.have_gfpgan)
with gr.Group():
codeformer_visibility = gr.Slider(minimum=0.0, maximum=1.0, step=0.001, label="CodeFormer visibility", value=0, interactive=modules.codeformer_model.have_codeformer)
codeformer_weight = gr.Slider(minimum=0.0, maximum=1.0, step=0.001, label="CodeFormer weight (0 = maximum effect, 1 = minimum effect)", value=0, interactive=modules.codeformer_model.have_codeformer)
submit = gr.Button('Generate', elem_id="extras_generate", variant='primary')
with gr.Column(variant='panel'):
result_images = gr.Gallery(label="Result", show_label=False)
html_info_x = gr.HTML()
html_info = gr.HTML()
extras_send_to_img2img = gr.Button('Send to img2img')
extras_send_to_inpaint = gr.Button('Send to inpaint')
button_id = "hidden_element" if shared.cmd_opts.hide_ui_dir_config else ''
open_extras_folder = gr.Button('Open output directory', elem_id=button_id)
submit.click(
fn=wrap_gradio_gpu_call(modules.extras.run_extras),
_js="get_extras_tab_index",
inputs=[
dummy_component,
extras_image,
image_batch,
gfpgan_visibility,
codeformer_visibility,
codeformer_weight,
upscaling_resize,
extras_upscaler_1,
extras_upscaler_2,
extras_upscaler_2_visibility,
],
outputs=[
result_images,
html_info_x,
html_info,
]
)
extras_send_to_img2img.click(
fn=lambda x: image_from_url_text(x),
_js="extract_image_from_gallery_img2img",
inputs=[result_images],
outputs=[init_img],
)
extras_send_to_inpaint.click(
fn=lambda x: image_from_url_text(x),
_js="extract_image_from_gallery_img2img",
inputs=[result_images],
outputs=[init_img_with_mask],
)
with gr.Blocks(analytics_enabled=False) as pnginfo_interface:
with gr.Row().style(equal_height=False):
with gr.Column(variant='panel'):
image = gr.Image(elem_id="pnginfo_image", label="Source", source="upload", interactive=True, type="pil")
with gr.Column(variant='panel'):
html = gr.HTML()
generation_info = gr.Textbox(visible=False)
html2 = gr.HTML()
with gr.Row():
pnginfo_send_to_txt2img = gr.Button('Send to txt2img')
pnginfo_send_to_img2img = gr.Button('Send to img2img')
image.change(
fn=wrap_gradio_call(modules.extras.run_pnginfo),
inputs=[image],
outputs=[html, generation_info, html2],
)
with gr.Blocks() as modelmerger_interface:
with gr.Row().style(equal_height=False):
with gr.Column(variant='panel'):
gr.HTML(value="<p>A merger of the two checkpoints will be generated in your <b>checkpoint</b> directory.</p>")
with gr.Row():
primary_model_name = gr.Dropdown(modules.sd_models.checkpoint_tiles(), elem_id="modelmerger_primary_model_name", label="Primary Model Name")
secondary_model_name = gr.Dropdown(modules.sd_models.checkpoint_tiles(), elem_id="modelmerger_secondary_model_name", label="Secondary Model Name")
custom_name = gr.Textbox(label="Custom Name (Optional)")
interp_amount = gr.Slider(minimum=0.0, maximum=1.0, step=0.05, label='Interpolation Amount', value=0.3)
interp_method = gr.Radio(choices=["Weighted Sum", "Sigmoid", "Inverse Sigmoid"], value="Weighted Sum", label="Interpolation Method")
save_as_half = gr.Checkbox(value=False, label="Safe as float16")
modelmerger_merge = gr.Button(elem_id="modelmerger_merge", label="Merge", variant='primary')
with gr.Column(variant='panel'):
submit_result = gr.Textbox(elem_id="modelmerger_result", show_label=False)
sd_hijack.model_hijack.embedding_db.load_textual_inversion_embeddings()
with gr.Blocks() as textual_inversion_interface:
with gr.Row().style(equal_height=False):
with gr.Column():
with gr.Group():
gr.HTML(value="<p style='margin-bottom: 0.7em'>Create a new embedding</p>")
new_embedding_name = gr.Textbox(label="Name")
initialization_text = gr.Textbox(label="Initialization text", value="*")
nvpt = gr.Slider(label="Number of vectors per token", minimum=1, maximum=75, step=1, value=1)
with gr.Row():
with gr.Column(scale=3):
gr.HTML(value="")
with gr.Column():
create_embedding = gr.Button(value="Create", variant='primary')
with gr.Group():
gr.HTML(value="<p style='margin-bottom: 0.7em'>Train an embedding; must specify a directory with a set of 512x512 images</p>")
train_embedding_name = gr.Dropdown(label='Embedding', choices=sorted(sd_hijack.model_hijack.embedding_db.word_embeddings.keys()))
learn_rate = gr.Number(label='Learning rate', value=5.0e-03)
dataset_directory = gr.Textbox(label='Dataset directory', placeholder="Path to directory with input images")
log_directory = gr.Textbox(label='Log directory', placeholder="Path to directory where to write outputs", value="textual_inversion")
template_file = gr.Textbox(label='Prompt template file', value=os.path.join(script_path, "textual_inversion_templates", "style_filewords.txt"))
steps = gr.Number(label='Max steps', value=100000, precision=0)
create_image_every = gr.Number(label='Save an image to log directory every N steps, 0 to disable', value=1000, precision=0)
save_embedding_every = gr.Number(label='Save a copy of embedding to log directory every N steps, 0 to disable', value=1000, precision=0)
with gr.Row():
with gr.Column(scale=2):
gr.HTML(value="")
with gr.Column():
with gr.Row():
interrupt_training = gr.Button(value="Interrupt")
train_embedding = gr.Button(value="Train", variant='primary')
with gr.Column():
progressbar = gr.HTML(elem_id="ti_progressbar")
ti_output = gr.Text(elem_id="ti_output", value="", show_label=False)
ti_gallery = gr.Gallery(label='Output', show_label=False, elem_id='ti_gallery').style(grid=4)
ti_preview = gr.Image(elem_id='ti_preview', visible=False)
ti_progress = gr.HTML(elem_id="ti_progress", value="")
ti_outcome = gr.HTML(elem_id="ti_error", value="")
setup_progressbar(progressbar, ti_preview, 'ti', textinfo=ti_progress)
create_embedding.click(
fn=modules.textual_inversion.ui.create_embedding,
inputs=[
new_embedding_name,
initialization_text,
nvpt,
],
outputs=[
train_embedding_name,
ti_output,
ti_outcome,
]
)
train_embedding.click(
fn=wrap_gradio_gpu_call(modules.textual_inversion.ui.train_embedding, extra_outputs=[gr.update()]),
_js="start_training_textual_inversion",
inputs=[
train_embedding_name,
learn_rate,
dataset_directory,
log_directory,
steps,
create_image_every,
save_embedding_every,
template_file,
],
outputs=[
ti_output,
ti_outcome,
]
)
interrupt_training.click(
fn=lambda: shared.state.interrupt(),
inputs=[],
outputs=[],
)
def create_setting_component(key):
def fun():
return opts.data[key] if key in opts.data else opts.data_labels[key].default
info = opts.data_labels[key]
t = type(info.default)
args = info.component_args() if callable(info.component_args) else info.component_args
if info.component is not None:
comp = info.component
elif t == str:
comp = gr.Textbox
elif t == int:
comp = gr.Number
elif t == bool:
comp = gr.Checkbox
else:
raise Exception(f'bad options item type: {str(t)} for key {key}')
return comp(label=info.label, value=fun, **(args or {}))
components = []
component_dict = {}
def open_folder(f):
if not shared.cmd_opts.hide_ui_dir_config:
path = os.path.normpath(f)
if platform.system() == "Windows":
os.startfile(path)
elif platform.system() == "Darwin":
sp.Popen(["open", path])
else:
sp.Popen(["xdg-open", path])
def run_settings(*args):
changed = 0
for key, value, comp in zip(opts.data_labels.keys(), args, components):
if not opts.same_type(value, opts.data_labels[key].default):
return f"Bad value for setting {key}: {value}; expecting {type(opts.data_labels[key].default).__name__}"
for key, value, comp in zip(opts.data_labels.keys(), args, components):
comp_args = opts.data_labels[key].component_args
if comp_args and isinstance(comp_args, dict) and comp_args.get('visible') is False:
continue
oldval = opts.data.get(key, None)
opts.data[key] = value
if oldval != value:
if opts.data_labels[key].onchange is not None:
opts.data_labels[key].onchange()
changed += 1
opts.save(shared.config_filename)
return f'{changed} settings changed.', opts.dumpjson()
with gr.Blocks(analytics_enabled=False) as settings_interface:
settings_submit = gr.Button(value="Apply settings", variant='primary')
result = gr.HTML()
settings_cols = 3
items_per_col = int(len(opts.data_labels) * 0.9 / settings_cols)
cols_displayed = 0
items_displayed = 0
previous_section = None
column = None
with gr.Row(elem_id="settings").style(equal_height=False):
for i, (k, item) in enumerate(opts.data_labels.items()):
if previous_section != item.section:
if cols_displayed < settings_cols and (items_displayed >= items_per_col or previous_section is None):
if column is not None:
column.__exit__()
column = gr.Column(variant='panel')
column.__enter__()
items_displayed = 0
cols_displayed += 1
previous_section = item.section
gr.HTML(elem_id="settings_header_text_{}".format(item.section[0]), value='<h1 class="gr-button-lg">{}</h1>'.format(item.section[1]))
component = create_setting_component(k)
component_dict[k] = component
components.append(component)
items_displayed += 1
request_notifications = gr.Button(value='Request browser notifications', elem_id="request_notifications")
request_notifications.click(
fn=lambda: None,
inputs=[],
outputs=[],
_js='function(){}'
)
with gr.Row():
reload_script_bodies = gr.Button(value='Reload custom script bodies (No ui updates, No restart)', variant='secondary')
restart_gradio = gr.Button(value='Restart Gradio and Refresh components (Custom Scripts, ui.py, js and css only)', variant='primary')
def reload_scripts():
modules.scripts.reload_script_body_only()
reload_script_bodies.click(
fn=reload_scripts,
inputs=[],
outputs=[],
_js='function(){}'
)
def request_restart():
settings_interface.gradio_ref.do_restart = True
restart_gradio.click(
fn=request_restart,
inputs=[],
outputs=[],
_js='function(){restart_reload()}'
)
if column is not None:
column.__exit__()
interfaces = [
(txt2img_interface, "txt2img", "txt2img"),
(img2img_interface, "img2img", "img2img"),
(extras_interface, "Extras", "extras"),
(pnginfo_interface, "PNG Info", "pnginfo"),
(modelmerger_interface, "Checkpoint Merger", "modelmerger"),
(textual_inversion_interface, "Textual inversion", "ti"),
(settings_interface, "Settings", "settings"),
]
with open(os.path.join(script_path, "style.css"), "r", encoding="utf8") as file:
css = file.read()
if os.path.exists(os.path.join(script_path, "user.css")):
with open(os.path.join(script_path, "user.css"), "r", encoding="utf8") as file:
usercss = file.read()
css += usercss
if not cmd_opts.no_progressbar_hiding:
css += css_hide_progressbar
with gr.Blocks(css=css, analytics_enabled=False, title="Stable Diffusion") as demo:
settings_interface.gradio_ref = demo
with gr.Tabs() as tabs:
for interface, label, ifid in interfaces:
with gr.TabItem(label, id=ifid):
interface.render()
if os.path.exists(os.path.join(script_path, "notification.mp3")):
audio_notification = gr.Audio(interactive=False, value=os.path.join(script_path, "notification.mp3"), elem_id="audio_notification", visible=False)
text_settings = gr.Textbox(elem_id="settings_json", value=lambda: opts.dumpjson(), visible=False)
settings_submit.click(
fn=run_settings,
inputs=components,
outputs=[result, text_settings],
)
def modelmerger(*args):
try:
results = modules.extras.run_modelmerger(*args)
except Exception as e:
print("Error loading/saving model file:", file=sys.stderr)
print(traceback.format_exc(), file=sys.stderr)
modules.sd_models.list_models() # to remove the potentially missing models from the list
return ["Error loading/saving model file. It doesn't exist or the name contains illegal characters"] + [gr.Dropdown.update(choices=modules.sd_models.checkpoint_tiles()) for _ in range(3)]
return results
modelmerger_merge.click(
fn=modelmerger,
inputs=[
primary_model_name,
secondary_model_name,
interp_method,
interp_amount,
save_as_half,
custom_name,
],
outputs=[
submit_result,
primary_model_name,
secondary_model_name,
component_dict['sd_model_checkpoint'],
]
)
paste_field_names = ['Prompt', 'Negative prompt', 'Steps', 'Face restoration', 'Seed', 'Size-1', 'Size-2']
txt2img_fields = [field for field,name in txt2img_paste_fields if name in paste_field_names]
img2img_fields = [field for field,name in img2img_paste_fields if name in paste_field_names]
send_to_img2img.click(
fn=lambda img, *args: (image_from_url_text(img),*args),
_js="(gallery, ...args) => [extract_image_from_gallery_img2img(gallery), ...args]",
inputs=[txt2img_gallery] + txt2img_fields,
outputs=[init_img] + img2img_fields,
)
send_to_inpaint.click(
fn=lambda x, *args: (image_from_url_text(x), *args),
_js="(gallery, ...args) => [extract_image_from_gallery_inpaint(gallery), ...args]",
inputs=[txt2img_gallery] + txt2img_fields,
outputs=[init_img_with_mask] + img2img_fields,
)
img2img_send_to_img2img.click(
fn=lambda x: image_from_url_text(x),
_js="extract_image_from_gallery_img2img",
inputs=[img2img_gallery],
outputs=[init_img],
)
img2img_send_to_inpaint.click(
fn=lambda x: image_from_url_text(x),
_js="extract_image_from_gallery_inpaint",
inputs=[img2img_gallery],
outputs=[init_img_with_mask],
)
send_to_extras.click(
fn=lambda x: image_from_url_text(x),
_js="extract_image_from_gallery_extras",
inputs=[txt2img_gallery],
outputs=[extras_image],
)
open_txt2img_folder.click(
fn=lambda: open_folder(opts.outdir_samples or opts.outdir_txt2img_samples),
inputs=[],
outputs=[],
)
open_img2img_folder.click(
fn=lambda: open_folder(opts.outdir_samples or opts.outdir_img2img_samples),
inputs=[],
outputs=[],
)
open_extras_folder.click(
fn=lambda: open_folder(opts.outdir_samples or opts.outdir_extras_samples),
inputs=[],
outputs=[],
)
img2img_send_to_extras.click(
fn=lambda x: image_from_url_text(x),
_js="extract_image_from_gallery_extras",
inputs=[img2img_gallery],
outputs=[extras_image],
)
modules.generation_parameters_copypaste.connect_paste(pnginfo_send_to_txt2img, txt2img_paste_fields, generation_info, 'switch_to_txt2img')
modules.generation_parameters_copypaste.connect_paste(pnginfo_send_to_img2img, img2img_paste_fields, generation_info, 'switch_to_img2img_img2img')
ui_config_file = cmd_opts.ui_config_file
ui_settings = {}
settings_count = len(ui_settings)
error_loading = False
try:
if os.path.exists(ui_config_file):
with open(ui_config_file, "r", encoding="utf8") as file:
ui_settings = json.load(file)
except Exception:
error_loading = True
print("Error loading settings:", file=sys.stderr)
print(traceback.format_exc(), file=sys.stderr)
def loadsave(path, x):
def apply_field(obj, field, condition=None):
key = path + "/" + field
if getattr(obj,'custom_script_source',None) is not None:
key = 'customscript/' + obj.custom_script_source + '/' + key
if getattr(obj, 'do_not_save_to_config', False):
return
saved_value = ui_settings.get(key, None)
if saved_value is None:
ui_settings[key] = getattr(obj, field)
elif condition is None or condition(saved_value):
setattr(obj, field, saved_value)
if type(x) in [gr.Slider, gr.Radio, gr.Checkbox, gr.Textbox, gr.Number] and x.visible:
apply_field(x, 'visible')
if type(x) == gr.Slider:
apply_field(x, 'value')
apply_field(x, 'minimum')
apply_field(x, 'maximum')
apply_field(x, 'step')
if type(x) == gr.Radio:
apply_field(x, 'value', lambda val: val in x.choices)
if type(x) == gr.Checkbox:
apply_field(x, 'value')
if type(x) == gr.Textbox:
apply_field(x, 'value')
if type(x) == gr.Number:
apply_field(x, 'value')
visit(txt2img_interface, loadsave, "txt2img")
visit(img2img_interface, loadsave, "img2img")
visit(extras_interface, loadsave, "extras")
if not error_loading and (not os.path.exists(ui_config_file) or settings_count != len(ui_settings)):
with open(ui_config_file, "w", encoding="utf8") as file:
json.dump(ui_settings, file, indent=4)
return demo
with open(os.path.join(script_path, "script.js"), "r", encoding="utf8") as jsfile:
javascript = f'<script>{jsfile.read()}</script>'
jsdir = os.path.join(script_path, "javascript")
for filename in sorted(os.listdir(jsdir)):
with open(os.path.join(jsdir, filename), "r", encoding="utf8") as jsfile:
javascript += f"\n<script>{jsfile.read()}</script>"
if 'gradio_routes_templates_response' not in globals():
def template_response(*args, **kwargs):
res = gradio_routes_templates_response(*args, **kwargs)
res.body = res.body.replace(b'</head>', f'{javascript}</head>'.encode("utf8"))
res.init_headers()
return res
gradio_routes_templates_response = gradio.routes.templates.TemplateResponse
gradio.routes.templates.TemplateResponse = template_response
<|code_end|>
webui.py
<|code_start|>import os
import threading
import time
import importlib
from modules import devices
from modules.paths import script_path
import signal
import threading
import modules.codeformer_model as codeformer
import modules.extras
import modules.face_restoration
import modules.gfpgan_model as gfpgan
import modules.img2img
import modules.lowvram
import modules.paths
import modules.scripts
import modules.sd_hijack
import modules.sd_models
import modules.shared as shared
import modules.txt2img
import modules.ui
from modules import devices
from modules import modelloader
from modules.paths import script_path
from modules.shared import cmd_opts
modelloader.cleanup_models()
modules.sd_models.setup_model()
codeformer.setup_model(cmd_opts.codeformer_models_path)
gfpgan.setup_model(cmd_opts.gfpgan_models_path)
shared.face_restorers.append(modules.face_restoration.FaceRestoration())
modelloader.load_upscalers()
queue_lock = threading.Lock()
def wrap_queued_call(func):
def f(*args, **kwargs):
with queue_lock:
res = func(*args, **kwargs)
return res
return f
def wrap_gradio_gpu_call(func, extra_outputs=None):
def f(*args, **kwargs):
devices.torch_gc()
shared.state.sampling_step = 0
shared.state.job_count = -1
shared.state.job_no = 0
shared.state.job_timestamp = shared.state.get_job_timestamp()
shared.state.current_latent = None
shared.state.current_image = None
shared.state.current_image_sampling_step = 0
shared.state.interrupted = False
shared.state.textinfo = None
with queue_lock:
res = func(*args, **kwargs)
shared.state.job = ""
shared.state.job_count = 0
devices.torch_gc()
return res
return modules.ui.wrap_gradio_call(f, extra_outputs=extra_outputs)
modules.scripts.load_scripts(os.path.join(script_path, "scripts"))
shared.sd_model = modules.sd_models.load_model()
shared.opts.onchange("sd_model_checkpoint", wrap_queued_call(lambda: modules.sd_models.reload_model_weights(shared.sd_model)))
def webui():
# make the program just exit at ctrl+c without waiting for anything
def sigint_handler(sig, frame):
print(f'Interrupted with signal {sig} in {frame}')
os._exit(0)
signal.signal(signal.SIGINT, sigint_handler)
while 1:
demo = modules.ui.create_ui(wrap_gradio_gpu_call=wrap_gradio_gpu_call)
demo.launch(
share=cmd_opts.share,
server_name="0.0.0.0" if cmd_opts.listen else None,
server_port=cmd_opts.port,
debug=cmd_opts.gradio_debug,
auth=[tuple(cred.split(':')) for cred in cmd_opts.gradio_auth.strip('"').split(',')] if cmd_opts.gradio_auth else None,
inbrowser=cmd_opts.autolaunch,
prevent_thread_lock=True
)
while 1:
time.sleep(0.5)
if getattr(demo,'do_restart',False):
time.sleep(0.5)
demo.close()
time.sleep(0.5)
break
print('Reloading Custom Scripts')
modules.scripts.reload_scripts(os.path.join(script_path, "scripts"))
print('Reloading modules: modules.ui')
importlib.reload(modules.ui)
print('Restarting Gradio')
if __name__ == "__main__":
webui()
<|code_end|>
|
Display time taken in minutes when over 60 seconds
**Is your feature request related to a problem? Please describe.**
I am always investigating how my parameters relate to the performance for a run so I can time things better. If I'm stepping off the computer for 15 minutes, it's useful for me to be better understanding how many steps and batch count I can run in that time-frame.
**Describe the solution you'd like**
Display minutes when over 60s, e.g. `Time taken: 3m 35s` instead of `Time taken: 215.08s`
**Describe alternatives you've considered**
**Additional context**
|
modules/ui.py
<|code_start|>import base64
import html
import io
import json
import math
import mimetypes
import os
import random
import sys
import time
import traceback
import platform
import subprocess as sp
from functools import reduce
import numpy as np
import torch
from PIL import Image, PngImagePlugin
import piexif
import gradio as gr
import gradio.utils
import gradio.routes
from modules import sd_hijack
from modules.paths import script_path
from modules.shared import opts, cmd_opts
import modules.shared as shared
from modules.sd_samplers import samplers, samplers_for_img2img
from modules.sd_hijack import model_hijack
import modules.ldsr_model
import modules.scripts
import modules.gfpgan_model
import modules.codeformer_model
import modules.styles
import modules.generation_parameters_copypaste
from modules.prompt_parser import get_learned_conditioning_prompt_schedules
from modules.images import apply_filename_pattern, get_next_sequence_number
import modules.textual_inversion.ui
# this is a fix for Windows users. Without it, javascript files will be served with text/html content-type and the bowser will not show any UI
mimetypes.init()
mimetypes.add_type('application/javascript', '.js')
if not cmd_opts.share and not cmd_opts.listen:
# fix gradio phoning home
gradio.utils.version_check = lambda: None
gradio.utils.get_local_ip_address = lambda: '127.0.0.1'
def gr_show(visible=True):
return {"visible": visible, "__type__": "update"}
sample_img2img = "assets/stable-samples/img2img/sketch-mountains-input.jpg"
sample_img2img = sample_img2img if os.path.exists(sample_img2img) else None
css_hide_progressbar = """
.wrap .m-12 svg { display:none!important; }
.wrap .m-12::before { content:"Loading..." }
.progress-bar { display:none!important; }
.meta-text { display:none!important; }
"""
# Using constants for these since the variation selector isn't visible.
# Important that they exactly match script.js for tooltip to work.
random_symbol = '\U0001f3b2\ufe0f' # 🎲️
reuse_symbol = '\u267b\ufe0f' # ♻️
art_symbol = '\U0001f3a8' # 🎨
paste_symbol = '\u2199\ufe0f' # ↙
folder_symbol = '\U0001f4c2' # 📂
def plaintext_to_html(text):
text = "<p>" + "<br>\n".join([f"{html.escape(x)}" for x in text.split('\n')]) + "</p>"
return text
def image_from_url_text(filedata):
if type(filedata) == list:
if len(filedata) == 0:
return None
filedata = filedata[0]
if filedata.startswith("data:image/png;base64,"):
filedata = filedata[len("data:image/png;base64,"):]
filedata = base64.decodebytes(filedata.encode('utf-8'))
image = Image.open(io.BytesIO(filedata))
return image
def send_gradio_gallery_to_image(x):
if len(x) == 0:
return None
return image_from_url_text(x[0])
def save_files(js_data, images, index):
import csv
filenames = []
#quick dictionary to class object conversion. Its neccesary due apply_filename_pattern requiring it
class MyObject:
def __init__(self, d=None):
if d is not None:
for key, value in d.items():
setattr(self, key, value)
data = json.loads(js_data)
p = MyObject(data)
path = opts.outdir_save
save_to_dirs = opts.use_save_to_dirs_for_ui
if save_to_dirs:
dirname = apply_filename_pattern(opts.directories_filename_pattern or "[prompt_words]", p, p.seed, p.prompt)
path = os.path.join(opts.outdir_save, dirname)
os.makedirs(path, exist_ok=True)
if index > -1 and opts.save_selected_only and (index >= data["index_of_first_image"]): # ensures we are looking at a specific non-grid picture, and we have save_selected_only
images = [images[index]]
infotexts = [data["infotexts"][index]]
else:
infotexts = data["infotexts"]
with open(os.path.join(opts.outdir_save, "log.csv"), "a", encoding="utf8", newline='') as file:
at_start = file.tell() == 0
writer = csv.writer(file)
if at_start:
writer.writerow(["prompt", "seed", "width", "height", "sampler", "cfgs", "steps", "filename", "negative_prompt"])
file_decoration = opts.samples_filename_pattern or "[seed]-[prompt_spaces]"
if file_decoration != "":
file_decoration = "-" + file_decoration.lower()
file_decoration = apply_filename_pattern(file_decoration, p, p.seed, p.prompt)
truncated = (file_decoration[:240] + '..') if len(file_decoration) > 240 else file_decoration
filename_base = truncated
extension = opts.samples_format.lower()
basecount = get_next_sequence_number(path, "")
for i, filedata in enumerate(images):
file_number = f"{basecount+i:05}"
filename = file_number + filename_base + f".{extension}"
filepath = os.path.join(path, filename)
if filedata.startswith("data:image/png;base64,"):
filedata = filedata[len("data:image/png;base64,"):]
image = Image.open(io.BytesIO(base64.decodebytes(filedata.encode('utf-8'))))
if opts.enable_pnginfo and extension == 'png':
pnginfo = PngImagePlugin.PngInfo()
pnginfo.add_text('parameters', infotexts[i])
image.save(filepath, pnginfo=pnginfo)
else:
image.save(filepath, quality=opts.jpeg_quality)
if opts.enable_pnginfo and extension in ("jpg", "jpeg", "webp"):
piexif.insert(piexif.dump({"Exif": {
piexif.ExifIFD.UserComment: piexif.helper.UserComment.dump(infotexts[i], encoding="unicode")
}}), filepath)
filenames.append(filename)
writer.writerow([data["prompt"], data["seed"], data["width"], data["height"], data["sampler"], data["cfg_scale"], data["steps"], filenames[0], data["negative_prompt"]])
return '', '', plaintext_to_html(f"Saved: {filenames[0]}")
def wrap_gradio_call(func, extra_outputs=None):
def f(*args, extra_outputs_array=extra_outputs, **kwargs):
run_memmon = opts.memmon_poll_rate > 0 and not shared.mem_mon.disabled
if run_memmon:
shared.mem_mon.monitor()
t = time.perf_counter()
try:
res = list(func(*args, **kwargs))
except Exception as e:
print("Error completing request", file=sys.stderr)
print("Arguments:", args, kwargs, file=sys.stderr)
print(traceback.format_exc(), file=sys.stderr)
shared.state.job = ""
shared.state.job_count = 0
if extra_outputs_array is None:
extra_outputs_array = [None, '']
res = extra_outputs_array + [f"<div class='error'>{plaintext_to_html(type(e).__name__+': '+str(e))}</div>"]
elapsed = time.perf_counter() - t
if run_memmon:
mem_stats = {k: -(v//-(1024*1024)) for k, v in shared.mem_mon.stop().items()}
active_peak = mem_stats['active_peak']
reserved_peak = mem_stats['reserved_peak']
sys_peak = mem_stats['system_peak']
sys_total = mem_stats['total']
sys_pct = round(sys_peak/max(sys_total, 1) * 100, 2)
vram_html = f"<p class='vram'>Torch active/reserved: {active_peak}/{reserved_peak} MiB, <wbr>Sys VRAM: {sys_peak}/{sys_total} MiB ({sys_pct}%)</p>"
else:
vram_html = ''
# last item is always HTML
res[-1] += f"<div class='performance'><p class='time'>Time taken: <wbr>{elapsed:.2f}s</p>{vram_html}</div>"
shared.state.interrupted = False
shared.state.job_count = 0
return tuple(res)
return f
def check_progress_call(id_part):
if shared.state.job_count == 0:
return "", gr_show(False), gr_show(False), gr_show(False)
progress = 0
if shared.state.job_count > 0:
progress += shared.state.job_no / shared.state.job_count
if shared.state.sampling_steps > 0:
progress += 1 / shared.state.job_count * shared.state.sampling_step / shared.state.sampling_steps
progress = min(progress, 1)
progressbar = ""
if opts.show_progressbar:
progressbar = f"""<div class='progressDiv'><div class='progress' style="width:{progress * 100}%">{str(int(progress*100))+"%" if progress > 0.01 else ""}</div></div>"""
image = gr_show(False)
preview_visibility = gr_show(False)
if opts.show_progress_every_n_steps > 0:
if shared.parallel_processing_allowed:
if shared.state.sampling_step - shared.state.current_image_sampling_step >= opts.show_progress_every_n_steps and shared.state.current_latent is not None:
shared.state.current_image = modules.sd_samplers.sample_to_image(shared.state.current_latent)
shared.state.current_image_sampling_step = shared.state.sampling_step
image = shared.state.current_image
if image is None:
image = gr.update(value=None)
else:
preview_visibility = gr_show(True)
if shared.state.textinfo is not None:
textinfo_result = gr.HTML.update(value=shared.state.textinfo, visible=True)
else:
textinfo_result = gr_show(False)
return f"<span id='{id_part}_progress_span' style='display: none'>{time.time()}</span><p>{progressbar}</p>", preview_visibility, image, textinfo_result
def check_progress_call_initial(id_part):
shared.state.job_count = -1
shared.state.current_latent = None
shared.state.current_image = None
shared.state.textinfo = None
return check_progress_call(id_part)
def roll_artist(prompt):
allowed_cats = set([x for x in shared.artist_db.categories() if len(opts.random_artist_categories)==0 or x in opts.random_artist_categories])
artist = random.choice([x for x in shared.artist_db.artists if x.category in allowed_cats])
return prompt + ", " + artist.name if prompt != '' else artist.name
def visit(x, func, path=""):
if hasattr(x, 'children'):
for c in x.children:
visit(c, func, path)
elif x.label is not None:
func(path + "/" + str(x.label), x)
def add_style(name: str, prompt: str, negative_prompt: str):
if name is None:
return [gr_show(), gr_show()]
style = modules.styles.PromptStyle(name, prompt, negative_prompt)
shared.prompt_styles.styles[style.name] = style
# Save all loaded prompt styles: this allows us to update the storage format in the future more easily, because we
# reserialize all styles every time we save them
shared.prompt_styles.save_styles(shared.styles_filename)
return [gr.Dropdown.update(visible=True, choices=list(shared.prompt_styles.styles)) for _ in range(4)]
def apply_styles(prompt, prompt_neg, style1_name, style2_name):
prompt = shared.prompt_styles.apply_styles_to_prompt(prompt, [style1_name, style2_name])
prompt_neg = shared.prompt_styles.apply_negative_styles_to_prompt(prompt_neg, [style1_name, style2_name])
return [gr.Textbox.update(value=prompt), gr.Textbox.update(value=prompt_neg), gr.Dropdown.update(value="None"), gr.Dropdown.update(value="None")]
def interrogate(image):
prompt = shared.interrogator.interrogate(image)
return gr_show(True) if prompt is None else prompt
def create_seed_inputs():
with gr.Row():
with gr.Box():
with gr.Row(elem_id='seed_row'):
seed = (gr.Textbox if cmd_opts.use_textbox_seed else gr.Number)(label='Seed', value=-1)
seed.style(container=False)
random_seed = gr.Button(random_symbol, elem_id='random_seed')
reuse_seed = gr.Button(reuse_symbol, elem_id='reuse_seed')
with gr.Box(elem_id='subseed_show_box'):
seed_checkbox = gr.Checkbox(label='Extra', elem_id='subseed_show', value=False)
# Components to show/hide based on the 'Extra' checkbox
seed_extras = []
with gr.Row(visible=False) as seed_extra_row_1:
seed_extras.append(seed_extra_row_1)
with gr.Box():
with gr.Row(elem_id='subseed_row'):
subseed = gr.Number(label='Variation seed', value=-1)
subseed.style(container=False)
random_subseed = gr.Button(random_symbol, elem_id='random_subseed')
reuse_subseed = gr.Button(reuse_symbol, elem_id='reuse_subseed')
subseed_strength = gr.Slider(label='Variation strength', value=0.0, minimum=0, maximum=1, step=0.01)
with gr.Row(visible=False) as seed_extra_row_2:
seed_extras.append(seed_extra_row_2)
seed_resize_from_w = gr.Slider(minimum=0, maximum=2048, step=64, label="Resize seed from width", value=0)
seed_resize_from_h = gr.Slider(minimum=0, maximum=2048, step=64, label="Resize seed from height", value=0)
random_seed.click(fn=lambda: -1, show_progress=False, inputs=[], outputs=[seed])
random_subseed.click(fn=lambda: -1, show_progress=False, inputs=[], outputs=[subseed])
def change_visibility(show):
return {comp: gr_show(show) for comp in seed_extras}
seed_checkbox.change(change_visibility, show_progress=False, inputs=[seed_checkbox], outputs=seed_extras)
return seed, reuse_seed, subseed, reuse_subseed, subseed_strength, seed_resize_from_h, seed_resize_from_w, seed_checkbox
def connect_reuse_seed(seed: gr.Number, reuse_seed: gr.Button, generation_info: gr.Textbox, dummy_component, is_subseed):
""" Connects a 'reuse (sub)seed' button's click event so that it copies last used
(sub)seed value from generation info the to the seed field. If copying subseed and subseed strength
was 0, i.e. no variation seed was used, it copies the normal seed value instead."""
def copy_seed(gen_info_string: str, index):
res = -1
try:
gen_info = json.loads(gen_info_string)
index -= gen_info.get('index_of_first_image', 0)
if is_subseed and gen_info.get('subseed_strength', 0) > 0:
all_subseeds = gen_info.get('all_subseeds', [-1])
res = all_subseeds[index if 0 <= index < len(all_subseeds) else 0]
else:
all_seeds = gen_info.get('all_seeds', [-1])
res = all_seeds[index if 0 <= index < len(all_seeds) else 0]
except json.decoder.JSONDecodeError as e:
if gen_info_string != '':
print("Error parsing JSON generation info:", file=sys.stderr)
print(gen_info_string, file=sys.stderr)
return [res, gr_show(False)]
reuse_seed.click(
fn=copy_seed,
_js="(x, y) => [x, selected_gallery_index()]",
show_progress=False,
inputs=[generation_info, dummy_component],
outputs=[seed, dummy_component]
)
def update_token_counter(text, steps):
try:
prompt_schedules = get_learned_conditioning_prompt_schedules([text], steps)
except Exception:
# a parsing error can happen here during typing, and we don't want to bother the user with
# messages related to it in console
prompt_schedules = [[[steps, text]]]
flat_prompts = reduce(lambda list1, list2: list1+list2, prompt_schedules)
prompts = [prompt_text for step, prompt_text in flat_prompts]
tokens, token_count, max_length = max([model_hijack.tokenize(prompt) for prompt in prompts], key=lambda args: args[1])
style_class = ' class="red"' if (token_count > max_length) else ""
return f"<span {style_class}>{token_count}/{max_length}</span>"
def create_toprow(is_img2img):
id_part = "img2img" if is_img2img else "txt2img"
with gr.Row(elem_id="toprow"):
with gr.Column(scale=4):
with gr.Row():
with gr.Column(scale=80):
with gr.Row():
prompt = gr.Textbox(label="Prompt", elem_id=f"{id_part}_prompt", show_label=False, placeholder="Prompt", lines=2)
with gr.Column(scale=1, elem_id="roll_col"):
roll = gr.Button(value=art_symbol, elem_id="roll", visible=len(shared.artist_db.artists) > 0)
paste = gr.Button(value=paste_symbol, elem_id="paste")
token_counter = gr.HTML(value="<span></span>", elem_id=f"{id_part}_token_counter")
token_button = gr.Button(visible=False, elem_id=f"{id_part}_token_button")
with gr.Column(scale=10, elem_id="style_pos_col"):
prompt_style = gr.Dropdown(label="Style 1", elem_id=f"{id_part}_style_index", choices=[k for k, v in shared.prompt_styles.styles.items()], value=next(iter(shared.prompt_styles.styles.keys())), visible=len(shared.prompt_styles.styles) > 1)
with gr.Row():
with gr.Column(scale=8):
negative_prompt = gr.Textbox(label="Negative prompt", elem_id="negative_prompt", show_label=False, placeholder="Negative prompt", lines=2)
with gr.Column(scale=1, elem_id="style_neg_col"):
prompt_style2 = gr.Dropdown(label="Style 2", elem_id=f"{id_part}_style2_index", choices=[k for k, v in shared.prompt_styles.styles.items()], value=next(iter(shared.prompt_styles.styles.keys())), visible=len(shared.prompt_styles.styles) > 1)
with gr.Column(scale=1):
with gr.Row():
interrupt = gr.Button('Interrupt', elem_id=f"{id_part}_interrupt")
submit = gr.Button('Generate', elem_id=f"{id_part}_generate", variant='primary')
interrupt.click(
fn=lambda: shared.state.interrupt(),
inputs=[],
outputs=[],
)
with gr.Row():
if is_img2img:
interrogate = gr.Button('Interrogate', elem_id="interrogate")
else:
interrogate = None
prompt_style_apply = gr.Button('Apply style', elem_id="style_apply")
save_style = gr.Button('Create style', elem_id="style_create")
return prompt, roll, prompt_style, negative_prompt, prompt_style2, submit, interrogate, prompt_style_apply, save_style, paste, token_counter, token_button
def setup_progressbar(progressbar, preview, id_part, textinfo=None):
if textinfo is None:
textinfo = gr.HTML(visible=False)
check_progress = gr.Button('Check progress', elem_id=f"{id_part}_check_progress", visible=False)
check_progress.click(
fn=lambda: check_progress_call(id_part),
show_progress=False,
inputs=[],
outputs=[progressbar, preview, preview, textinfo],
)
check_progress_initial = gr.Button('Check progress (first)', elem_id=f"{id_part}_check_progress_initial", visible=False)
check_progress_initial.click(
fn=lambda: check_progress_call_initial(id_part),
show_progress=False,
inputs=[],
outputs=[progressbar, preview, preview, textinfo],
)
def create_ui(wrap_gradio_gpu_call):
import modules.img2img
import modules.txt2img
with gr.Blocks(analytics_enabled=False) as txt2img_interface:
txt2img_prompt, roll, txt2img_prompt_style, txt2img_negative_prompt, txt2img_prompt_style2, submit, _, txt2img_prompt_style_apply, txt2img_save_style, paste, token_counter, token_button = create_toprow(is_img2img=False)
dummy_component = gr.Label(visible=False)
with gr.Row(elem_id='txt2img_progress_row'):
with gr.Column(scale=1):
pass
with gr.Column(scale=1):
progressbar = gr.HTML(elem_id="txt2img_progressbar")
txt2img_preview = gr.Image(elem_id='txt2img_preview', visible=False)
setup_progressbar(progressbar, txt2img_preview, 'txt2img')
with gr.Row().style(equal_height=False):
with gr.Column(variant='panel'):
steps = gr.Slider(minimum=1, maximum=150, step=1, label="Sampling Steps", value=20)
sampler_index = gr.Radio(label='Sampling method', elem_id="txt2img_sampling", choices=[x.name for x in samplers], value=samplers[0].name, type="index")
with gr.Group():
width = gr.Slider(minimum=64, maximum=2048, step=64, label="Width", value=512)
height = gr.Slider(minimum=64, maximum=2048, step=64, label="Height", value=512)
with gr.Row():
restore_faces = gr.Checkbox(label='Restore faces', value=False, visible=len(shared.face_restorers) > 1)
tiling = gr.Checkbox(label='Tiling', value=False)
enable_hr = gr.Checkbox(label='Highres. fix', value=False)
with gr.Row(visible=False) as hr_options:
scale_latent = gr.Checkbox(label='Scale latent', value=False)
denoising_strength = gr.Slider(minimum=0.0, maximum=1.0, step=0.01, label='Denoising strength', value=0.7)
with gr.Row():
batch_count = gr.Slider(minimum=1, maximum=cmd_opts.max_batch_count, step=1, label='Batch count', value=1)
batch_size = gr.Slider(minimum=1, maximum=8, step=1, label='Batch size', value=1)
cfg_scale = gr.Slider(minimum=1.0, maximum=30.0, step=0.5, label='CFG Scale', value=7.0)
seed, reuse_seed, subseed, reuse_subseed, subseed_strength, seed_resize_from_h, seed_resize_from_w, seed_checkbox = create_seed_inputs()
with gr.Group():
custom_inputs = modules.scripts.scripts_txt2img.setup_ui(is_img2img=False)
with gr.Column(variant='panel'):
with gr.Group():
txt2img_preview = gr.Image(elem_id='txt2img_preview', visible=False)
txt2img_gallery = gr.Gallery(label='Output', show_label=False, elem_id='txt2img_gallery').style(grid=4)
with gr.Group():
with gr.Row():
save = gr.Button('Save')
send_to_img2img = gr.Button('Send to img2img')
send_to_inpaint = gr.Button('Send to inpaint')
send_to_extras = gr.Button('Send to extras')
button_id = "hidden_element" if shared.cmd_opts.hide_ui_dir_config else 'open_folder'
open_txt2img_folder = gr.Button(folder_symbol, elem_id=button_id)
with gr.Group():
html_info = gr.HTML()
generation_info = gr.Textbox(visible=False)
connect_reuse_seed(seed, reuse_seed, generation_info, dummy_component, is_subseed=False)
connect_reuse_seed(subseed, reuse_subseed, generation_info, dummy_component, is_subseed=True)
txt2img_args = dict(
fn=wrap_gradio_gpu_call(modules.txt2img.txt2img),
_js="submit",
inputs=[
txt2img_prompt,
txt2img_negative_prompt,
txt2img_prompt_style,
txt2img_prompt_style2,
steps,
sampler_index,
restore_faces,
tiling,
batch_count,
batch_size,
cfg_scale,
seed,
subseed, subseed_strength, seed_resize_from_h, seed_resize_from_w, seed_checkbox,
height,
width,
enable_hr,
scale_latent,
denoising_strength,
] + custom_inputs,
outputs=[
txt2img_gallery,
generation_info,
html_info
],
show_progress=False,
)
txt2img_prompt.submit(**txt2img_args)
submit.click(**txt2img_args)
enable_hr.change(
fn=lambda x: gr_show(x),
inputs=[enable_hr],
outputs=[hr_options],
)
save.click(
fn=wrap_gradio_call(save_files),
_js="(x, y, z) => [x, y, selected_gallery_index()]",
inputs=[
generation_info,
txt2img_gallery,
html_info,
],
outputs=[
html_info,
html_info,
html_info,
]
)
roll.click(
fn=roll_artist,
_js="update_txt2img_tokens",
inputs=[
txt2img_prompt,
],
outputs=[
txt2img_prompt,
]
)
txt2img_paste_fields = [
(txt2img_prompt, "Prompt"),
(txt2img_negative_prompt, "Negative prompt"),
(steps, "Steps"),
(sampler_index, "Sampler"),
(restore_faces, "Face restoration"),
(cfg_scale, "CFG scale"),
(seed, "Seed"),
(width, "Size-1"),
(height, "Size-2"),
(batch_size, "Batch size"),
(subseed, "Variation seed"),
(subseed_strength, "Variation seed strength"),
(seed_resize_from_w, "Seed resize from-1"),
(seed_resize_from_h, "Seed resize from-2"),
(denoising_strength, "Denoising strength"),
(enable_hr, lambda d: "Denoising strength" in d),
(hr_options, lambda d: gr.Row.update(visible="Denoising strength" in d)),
]
modules.generation_parameters_copypaste.connect_paste(paste, txt2img_paste_fields, txt2img_prompt)
token_button.click(fn=update_token_counter, inputs=[txt2img_prompt, steps], outputs=[token_counter])
with gr.Blocks(analytics_enabled=False) as img2img_interface:
img2img_prompt, roll, img2img_prompt_style, img2img_negative_prompt, img2img_prompt_style2, submit, img2img_interrogate, img2img_prompt_style_apply, img2img_save_style, paste, token_counter, token_button = create_toprow(is_img2img=True)
with gr.Row(elem_id='img2img_progress_row'):
with gr.Column(scale=1):
pass
with gr.Column(scale=1):
progressbar = gr.HTML(elem_id="img2img_progressbar")
img2img_preview = gr.Image(elem_id='img2img_preview', visible=False)
setup_progressbar(progressbar, img2img_preview, 'img2img')
with gr.Row().style(equal_height=False):
with gr.Column(variant='panel'):
with gr.Tabs(elem_id="mode_img2img") as tabs_img2img_mode:
with gr.TabItem('img2img', id='img2img'):
init_img = gr.Image(label="Image for img2img", elem_id="img2img_image", show_label=False, source="upload", interactive=True, type="pil", tool=cmd_opts.gradio_img2img_tool)
with gr.TabItem('Inpaint', id='inpaint'):
init_img_with_mask = gr.Image(label="Image for inpainting with mask", show_label=False, elem_id="img2maskimg", source="upload", interactive=True, type="pil", tool="sketch", image_mode="RGBA")
init_img_inpaint = gr.Image(label="Image for img2img", show_label=False, source="upload", interactive=True, type="pil", visible=False, elem_id="img_inpaint_base")
init_mask_inpaint = gr.Image(label="Mask", source="upload", interactive=True, type="pil", visible=False, elem_id="img_inpaint_mask")
mask_blur = gr.Slider(label='Mask blur', minimum=0, maximum=64, step=1, value=4)
with gr.Row():
mask_mode = gr.Radio(label="Mask mode", show_label=False, choices=["Draw mask", "Upload mask"], type="index", value="Draw mask", elem_id="mask_mode")
inpainting_mask_invert = gr.Radio(label='Masking mode', show_label=False, choices=['Inpaint masked', 'Inpaint not masked'], value='Inpaint masked', type="index")
inpainting_fill = gr.Radio(label='Masked content', choices=['fill', 'original', 'latent noise', 'latent nothing'], value='original', type="index")
with gr.Row():
inpaint_full_res = gr.Checkbox(label='Inpaint at full resolution', value=False)
inpaint_full_res_padding = gr.Slider(label='Inpaint at full resolution padding, pixels', minimum=0, maximum=256, step=4, value=32)
with gr.TabItem('Batch img2img', id='batch'):
hidden = '<br>Disabled when launched with --hide-ui-dir-config.' if shared.cmd_opts.hide_ui_dir_config else ''
gr.HTML(f"<p class=\"text-gray-500\">Process images in a directory on the same machine where the server is running.<br>Use an empty output directory to save pictures normally instead of writing to the output directory.{hidden}</p>")
img2img_batch_input_dir = gr.Textbox(label="Input directory", **shared.hide_dirs)
img2img_batch_output_dir = gr.Textbox(label="Output directory", **shared.hide_dirs)
with gr.Row():
resize_mode = gr.Radio(label="Resize mode", elem_id="resize_mode", show_label=False, choices=["Just resize", "Crop and resize", "Resize and fill"], type="index", value="Just resize")
steps = gr.Slider(minimum=1, maximum=150, step=1, label="Sampling Steps", value=20)
sampler_index = gr.Radio(label='Sampling method', choices=[x.name for x in samplers_for_img2img], value=samplers_for_img2img[0].name, type="index")
with gr.Group():
width = gr.Slider(minimum=64, maximum=2048, step=64, label="Width", value=512)
height = gr.Slider(minimum=64, maximum=2048, step=64, label="Height", value=512)
with gr.Row():
restore_faces = gr.Checkbox(label='Restore faces', value=False, visible=len(shared.face_restorers) > 1)
tiling = gr.Checkbox(label='Tiling', value=False)
with gr.Row():
batch_count = gr.Slider(minimum=1, maximum=cmd_opts.max_batch_count, step=1, label='Batch count', value=1)
batch_size = gr.Slider(minimum=1, maximum=8, step=1, label='Batch size', value=1)
with gr.Group():
cfg_scale = gr.Slider(minimum=1.0, maximum=30.0, step=0.5, label='CFG Scale', value=7.0)
denoising_strength = gr.Slider(minimum=0.0, maximum=1.0, step=0.01, label='Denoising strength', value=0.75)
seed, reuse_seed, subseed, reuse_subseed, subseed_strength, seed_resize_from_h, seed_resize_from_w, seed_checkbox = create_seed_inputs()
with gr.Group():
custom_inputs = modules.scripts.scripts_img2img.setup_ui(is_img2img=True)
with gr.Column(variant='panel'):
with gr.Group():
img2img_preview = gr.Image(elem_id='img2img_preview', visible=False)
img2img_gallery = gr.Gallery(label='Output', show_label=False, elem_id='img2img_gallery').style(grid=4)
with gr.Group():
with gr.Row():
save = gr.Button('Save')
img2img_send_to_img2img = gr.Button('Send to img2img')
img2img_send_to_inpaint = gr.Button('Send to inpaint')
img2img_send_to_extras = gr.Button('Send to extras')
button_id = "hidden_element" if shared.cmd_opts.hide_ui_dir_config else 'open_folder'
open_img2img_folder = gr.Button(folder_symbol, elem_id=button_id)
with gr.Group():
html_info = gr.HTML()
generation_info = gr.Textbox(visible=False)
connect_reuse_seed(seed, reuse_seed, generation_info, dummy_component, is_subseed=False)
connect_reuse_seed(subseed, reuse_subseed, generation_info, dummy_component, is_subseed=True)
mask_mode.change(
lambda mode, img: {
init_img_with_mask: gr_show(mode == 0),
init_img_inpaint: gr_show(mode == 1),
init_mask_inpaint: gr_show(mode == 1),
},
inputs=[mask_mode, init_img_with_mask],
outputs=[
init_img_with_mask,
init_img_inpaint,
init_mask_inpaint,
],
)
img2img_args = dict(
fn=wrap_gradio_gpu_call(modules.img2img.img2img),
_js="submit_img2img",
inputs=[
dummy_component,
img2img_prompt,
img2img_negative_prompt,
img2img_prompt_style,
img2img_prompt_style2,
init_img,
init_img_with_mask,
init_img_inpaint,
init_mask_inpaint,
mask_mode,
steps,
sampler_index,
mask_blur,
inpainting_fill,
restore_faces,
tiling,
batch_count,
batch_size,
cfg_scale,
denoising_strength,
seed,
subseed, subseed_strength, seed_resize_from_h, seed_resize_from_w, seed_checkbox,
height,
width,
resize_mode,
inpaint_full_res,
inpaint_full_res_padding,
inpainting_mask_invert,
img2img_batch_input_dir,
img2img_batch_output_dir,
] + custom_inputs,
outputs=[
img2img_gallery,
generation_info,
html_info
],
show_progress=False,
)
img2img_prompt.submit(**img2img_args)
submit.click(**img2img_args)
img2img_interrogate.click(
fn=interrogate,
inputs=[init_img],
outputs=[img2img_prompt],
)
save.click(
fn=wrap_gradio_call(save_files),
_js="(x, y, z) => [x, y, selected_gallery_index()]",
inputs=[
generation_info,
img2img_gallery,
html_info
],
outputs=[
html_info,
html_info,
html_info,
]
)
roll.click(
fn=roll_artist,
_js="update_img2img_tokens",
inputs=[
img2img_prompt,
],
outputs=[
img2img_prompt,
]
)
prompts = [(txt2img_prompt, txt2img_negative_prompt), (img2img_prompt, img2img_negative_prompt)]
style_dropdowns = [(txt2img_prompt_style, txt2img_prompt_style2), (img2img_prompt_style, img2img_prompt_style2)]
style_js_funcs = ["update_txt2img_tokens", "update_img2img_tokens"]
for button, (prompt, negative_prompt) in zip([txt2img_save_style, img2img_save_style], prompts):
button.click(
fn=add_style,
_js="ask_for_style_name",
# Have to pass empty dummy component here, because the JavaScript and Python function have to accept
# the same number of parameters, but we only know the style-name after the JavaScript prompt
inputs=[dummy_component, prompt, negative_prompt],
outputs=[txt2img_prompt_style, img2img_prompt_style, txt2img_prompt_style2, img2img_prompt_style2],
)
for button, (prompt, negative_prompt), (style1, style2), js_func in zip([txt2img_prompt_style_apply, img2img_prompt_style_apply], prompts, style_dropdowns, style_js_funcs):
button.click(
fn=apply_styles,
_js=js_func,
inputs=[prompt, negative_prompt, style1, style2],
outputs=[prompt, negative_prompt, style1, style2],
)
img2img_paste_fields = [
(img2img_prompt, "Prompt"),
(img2img_negative_prompt, "Negative prompt"),
(steps, "Steps"),
(sampler_index, "Sampler"),
(restore_faces, "Face restoration"),
(cfg_scale, "CFG scale"),
(seed, "Seed"),
(width, "Size-1"),
(height, "Size-2"),
(batch_size, "Batch size"),
(subseed, "Variation seed"),
(subseed_strength, "Variation seed strength"),
(seed_resize_from_w, "Seed resize from-1"),
(seed_resize_from_h, "Seed resize from-2"),
(denoising_strength, "Denoising strength"),
]
modules.generation_parameters_copypaste.connect_paste(paste, img2img_paste_fields, img2img_prompt)
token_button.click(fn=update_token_counter, inputs=[img2img_prompt, steps], outputs=[token_counter])
with gr.Blocks(analytics_enabled=False) as extras_interface:
with gr.Row().style(equal_height=False):
with gr.Column(variant='panel'):
with gr.Tabs(elem_id="mode_extras"):
with gr.TabItem('Single Image'):
extras_image = gr.Image(label="Source", source="upload", interactive=True, type="pil")
with gr.TabItem('Batch Process'):
image_batch = gr.File(label="Batch Process", file_count="multiple", interactive=True, type="file")
upscaling_resize = gr.Slider(minimum=1.0, maximum=4.0, step=0.05, label="Resize", value=2)
with gr.Group():
extras_upscaler_1 = gr.Radio(label='Upscaler 1', choices=[x.name for x in shared.sd_upscalers], value=shared.sd_upscalers[0].name, type="index")
with gr.Group():
extras_upscaler_2 = gr.Radio(label='Upscaler 2', choices=[x.name for x in shared.sd_upscalers], value=shared.sd_upscalers[0].name, type="index")
extras_upscaler_2_visibility = gr.Slider(minimum=0.0, maximum=1.0, step=0.001, label="Upscaler 2 visibility", value=1)
with gr.Group():
gfpgan_visibility = gr.Slider(minimum=0.0, maximum=1.0, step=0.001, label="GFPGAN visibility", value=0, interactive=modules.gfpgan_model.have_gfpgan)
with gr.Group():
codeformer_visibility = gr.Slider(minimum=0.0, maximum=1.0, step=0.001, label="CodeFormer visibility", value=0, interactive=modules.codeformer_model.have_codeformer)
codeformer_weight = gr.Slider(minimum=0.0, maximum=1.0, step=0.001, label="CodeFormer weight (0 = maximum effect, 1 = minimum effect)", value=0, interactive=modules.codeformer_model.have_codeformer)
submit = gr.Button('Generate', elem_id="extras_generate", variant='primary')
with gr.Column(variant='panel'):
result_images = gr.Gallery(label="Result", show_label=False)
html_info_x = gr.HTML()
html_info = gr.HTML()
extras_send_to_img2img = gr.Button('Send to img2img')
extras_send_to_inpaint = gr.Button('Send to inpaint')
button_id = "hidden_element" if shared.cmd_opts.hide_ui_dir_config else ''
open_extras_folder = gr.Button('Open output directory', elem_id=button_id)
submit.click(
fn=wrap_gradio_gpu_call(modules.extras.run_extras),
_js="get_extras_tab_index",
inputs=[
dummy_component,
extras_image,
image_batch,
gfpgan_visibility,
codeformer_visibility,
codeformer_weight,
upscaling_resize,
extras_upscaler_1,
extras_upscaler_2,
extras_upscaler_2_visibility,
],
outputs=[
result_images,
html_info_x,
html_info,
]
)
extras_send_to_img2img.click(
fn=lambda x: image_from_url_text(x),
_js="extract_image_from_gallery_img2img",
inputs=[result_images],
outputs=[init_img],
)
extras_send_to_inpaint.click(
fn=lambda x: image_from_url_text(x),
_js="extract_image_from_gallery_img2img",
inputs=[result_images],
outputs=[init_img_with_mask],
)
with gr.Blocks(analytics_enabled=False) as pnginfo_interface:
with gr.Row().style(equal_height=False):
with gr.Column(variant='panel'):
image = gr.Image(elem_id="pnginfo_image", label="Source", source="upload", interactive=True, type="pil")
with gr.Column(variant='panel'):
html = gr.HTML()
generation_info = gr.Textbox(visible=False)
html2 = gr.HTML()
with gr.Row():
pnginfo_send_to_txt2img = gr.Button('Send to txt2img')
pnginfo_send_to_img2img = gr.Button('Send to img2img')
image.change(
fn=wrap_gradio_call(modules.extras.run_pnginfo),
inputs=[image],
outputs=[html, generation_info, html2],
)
with gr.Blocks() as modelmerger_interface:
with gr.Row().style(equal_height=False):
with gr.Column(variant='panel'):
gr.HTML(value="<p>A merger of the two checkpoints will be generated in your <b>checkpoint</b> directory.</p>")
with gr.Row():
primary_model_name = gr.Dropdown(modules.sd_models.checkpoint_tiles(), elem_id="modelmerger_primary_model_name", label="Primary Model Name")
secondary_model_name = gr.Dropdown(modules.sd_models.checkpoint_tiles(), elem_id="modelmerger_secondary_model_name", label="Secondary Model Name")
custom_name = gr.Textbox(label="Custom Name (Optional)")
interp_amount = gr.Slider(minimum=0.0, maximum=1.0, step=0.05, label='Interpolation Amount', value=0.3)
interp_method = gr.Radio(choices=["Weighted Sum", "Sigmoid", "Inverse Sigmoid"], value="Weighted Sum", label="Interpolation Method")
save_as_half = gr.Checkbox(value=False, label="Safe as float16")
modelmerger_merge = gr.Button(elem_id="modelmerger_merge", label="Merge", variant='primary')
with gr.Column(variant='panel'):
submit_result = gr.Textbox(elem_id="modelmerger_result", show_label=False)
sd_hijack.model_hijack.embedding_db.load_textual_inversion_embeddings()
with gr.Blocks() as textual_inversion_interface:
with gr.Row().style(equal_height=False):
with gr.Column():
with gr.Group():
gr.HTML(value="<p style='margin-bottom: 0.7em'>See <b><a href=\"https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Textual-Inversion\">wiki</a></b> for detailed explanation.</p>")
gr.HTML(value="<p style='margin-bottom: 0.7em'>Create a new embedding</p>")
new_embedding_name = gr.Textbox(label="Name")
initialization_text = gr.Textbox(label="Initialization text", value="*")
nvpt = gr.Slider(label="Number of vectors per token", minimum=1, maximum=75, step=1, value=1)
with gr.Row():
with gr.Column(scale=3):
gr.HTML(value="")
with gr.Column():
create_embedding = gr.Button(value="Create", variant='primary')
with gr.Group():
gr.HTML(value="<p style='margin-bottom: 0.7em'>Preprocess images</p>")
process_src = gr.Textbox(label='Source directory')
process_dst = gr.Textbox(label='Destination directory')
with gr.Row():
process_flip = gr.Checkbox(label='Flip')
process_split = gr.Checkbox(label='Split into two')
process_caption = gr.Checkbox(label='Add caption')
with gr.Row():
with gr.Column(scale=3):
gr.HTML(value="")
with gr.Column():
run_preprocess = gr.Button(value="Preprocess", variant='primary')
with gr.Group():
gr.HTML(value="<p style='margin-bottom: 0.7em'>Train an embedding; must specify a directory with a set of 512x512 images</p>")
train_embedding_name = gr.Dropdown(label='Embedding', choices=sorted(sd_hijack.model_hijack.embedding_db.word_embeddings.keys()))
learn_rate = gr.Number(label='Learning rate', value=5.0e-03)
dataset_directory = gr.Textbox(label='Dataset directory', placeholder="Path to directory with input images")
log_directory = gr.Textbox(label='Log directory', placeholder="Path to directory where to write outputs", value="textual_inversion")
template_file = gr.Textbox(label='Prompt template file', value=os.path.join(script_path, "textual_inversion_templates", "style_filewords.txt"))
steps = gr.Number(label='Max steps', value=100000, precision=0)
create_image_every = gr.Number(label='Save an image to log directory every N steps, 0 to disable', value=500, precision=0)
save_embedding_every = gr.Number(label='Save a copy of embedding to log directory every N steps, 0 to disable', value=500, precision=0)
with gr.Row():
with gr.Column(scale=2):
gr.HTML(value="")
with gr.Column():
with gr.Row():
interrupt_training = gr.Button(value="Interrupt")
train_embedding = gr.Button(value="Train", variant='primary')
with gr.Column():
progressbar = gr.HTML(elem_id="ti_progressbar")
ti_output = gr.Text(elem_id="ti_output", value="", show_label=False)
ti_gallery = gr.Gallery(label='Output', show_label=False, elem_id='ti_gallery').style(grid=4)
ti_preview = gr.Image(elem_id='ti_preview', visible=False)
ti_progress = gr.HTML(elem_id="ti_progress", value="")
ti_outcome = gr.HTML(elem_id="ti_error", value="")
setup_progressbar(progressbar, ti_preview, 'ti', textinfo=ti_progress)
create_embedding.click(
fn=modules.textual_inversion.ui.create_embedding,
inputs=[
new_embedding_name,
initialization_text,
nvpt,
],
outputs=[
train_embedding_name,
ti_output,
ti_outcome,
]
)
run_preprocess.click(
fn=wrap_gradio_gpu_call(modules.textual_inversion.ui.preprocess, extra_outputs=[gr.update()]),
_js="start_training_textual_inversion",
inputs=[
process_src,
process_dst,
process_flip,
process_split,
process_caption,
],
outputs=[
ti_output,
ti_outcome,
],
)
train_embedding.click(
fn=wrap_gradio_gpu_call(modules.textual_inversion.ui.train_embedding, extra_outputs=[gr.update()]),
_js="start_training_textual_inversion",
inputs=[
train_embedding_name,
learn_rate,
dataset_directory,
log_directory,
steps,
create_image_every,
save_embedding_every,
template_file,
],
outputs=[
ti_output,
ti_outcome,
]
)
interrupt_training.click(
fn=lambda: shared.state.interrupt(),
inputs=[],
outputs=[],
)
def create_setting_component(key):
def fun():
return opts.data[key] if key in opts.data else opts.data_labels[key].default
info = opts.data_labels[key]
t = type(info.default)
args = info.component_args() if callable(info.component_args) else info.component_args
if info.component is not None:
comp = info.component
elif t == str:
comp = gr.Textbox
elif t == int:
comp = gr.Number
elif t == bool:
comp = gr.Checkbox
else:
raise Exception(f'bad options item type: {str(t)} for key {key}')
return comp(label=info.label, value=fun, **(args or {}))
components = []
component_dict = {}
def open_folder(f):
if not shared.cmd_opts.hide_ui_dir_config:
path = os.path.normpath(f)
if platform.system() == "Windows":
os.startfile(path)
elif platform.system() == "Darwin":
sp.Popen(["open", path])
else:
sp.Popen(["xdg-open", path])
def run_settings(*args):
changed = 0
for key, value, comp in zip(opts.data_labels.keys(), args, components):
if not opts.same_type(value, opts.data_labels[key].default):
return f"Bad value for setting {key}: {value}; expecting {type(opts.data_labels[key].default).__name__}"
for key, value, comp in zip(opts.data_labels.keys(), args, components):
comp_args = opts.data_labels[key].component_args
if comp_args and isinstance(comp_args, dict) and comp_args.get('visible') is False:
continue
oldval = opts.data.get(key, None)
opts.data[key] = value
if oldval != value:
if opts.data_labels[key].onchange is not None:
opts.data_labels[key].onchange()
changed += 1
opts.save(shared.config_filename)
return f'{changed} settings changed.', opts.dumpjson()
with gr.Blocks(analytics_enabled=False) as settings_interface:
settings_submit = gr.Button(value="Apply settings", variant='primary')
result = gr.HTML()
settings_cols = 3
items_per_col = int(len(opts.data_labels) * 0.9 / settings_cols)
cols_displayed = 0
items_displayed = 0
previous_section = None
column = None
with gr.Row(elem_id="settings").style(equal_height=False):
for i, (k, item) in enumerate(opts.data_labels.items()):
if previous_section != item.section:
if cols_displayed < settings_cols and (items_displayed >= items_per_col or previous_section is None):
if column is not None:
column.__exit__()
column = gr.Column(variant='panel')
column.__enter__()
items_displayed = 0
cols_displayed += 1
previous_section = item.section
gr.HTML(elem_id="settings_header_text_{}".format(item.section[0]), value='<h1 class="gr-button-lg">{}</h1>'.format(item.section[1]))
component = create_setting_component(k)
component_dict[k] = component
components.append(component)
items_displayed += 1
request_notifications = gr.Button(value='Request browser notifications', elem_id="request_notifications")
request_notifications.click(
fn=lambda: None,
inputs=[],
outputs=[],
_js='function(){}'
)
with gr.Row():
reload_script_bodies = gr.Button(value='Reload custom script bodies (No ui updates, No restart)', variant='secondary')
restart_gradio = gr.Button(value='Restart Gradio and Refresh components (Custom Scripts, ui.py, js and css only)', variant='primary')
def reload_scripts():
modules.scripts.reload_script_body_only()
reload_script_bodies.click(
fn=reload_scripts,
inputs=[],
outputs=[],
_js='function(){}'
)
def request_restart():
settings_interface.gradio_ref.do_restart = True
restart_gradio.click(
fn=request_restart,
inputs=[],
outputs=[],
_js='function(){restart_reload()}'
)
if column is not None:
column.__exit__()
interfaces = [
(txt2img_interface, "txt2img", "txt2img"),
(img2img_interface, "img2img", "img2img"),
(extras_interface, "Extras", "extras"),
(pnginfo_interface, "PNG Info", "pnginfo"),
(modelmerger_interface, "Checkpoint Merger", "modelmerger"),
(textual_inversion_interface, "Textual inversion", "ti"),
(settings_interface, "Settings", "settings"),
]
with open(os.path.join(script_path, "style.css"), "r", encoding="utf8") as file:
css = file.read()
if os.path.exists(os.path.join(script_path, "user.css")):
with open(os.path.join(script_path, "user.css"), "r", encoding="utf8") as file:
usercss = file.read()
css += usercss
if not cmd_opts.no_progressbar_hiding:
css += css_hide_progressbar
with gr.Blocks(css=css, analytics_enabled=False, title="Stable Diffusion") as demo:
settings_interface.gradio_ref = demo
with gr.Tabs() as tabs:
for interface, label, ifid in interfaces:
with gr.TabItem(label, id=ifid):
interface.render()
if os.path.exists(os.path.join(script_path, "notification.mp3")):
audio_notification = gr.Audio(interactive=False, value=os.path.join(script_path, "notification.mp3"), elem_id="audio_notification", visible=False)
text_settings = gr.Textbox(elem_id="settings_json", value=lambda: opts.dumpjson(), visible=False)
settings_submit.click(
fn=run_settings,
inputs=components,
outputs=[result, text_settings],
)
def modelmerger(*args):
try:
results = modules.extras.run_modelmerger(*args)
except Exception as e:
print("Error loading/saving model file:", file=sys.stderr)
print(traceback.format_exc(), file=sys.stderr)
modules.sd_models.list_models() # to remove the potentially missing models from the list
return ["Error loading/saving model file. It doesn't exist or the name contains illegal characters"] + [gr.Dropdown.update(choices=modules.sd_models.checkpoint_tiles()) for _ in range(3)]
return results
modelmerger_merge.click(
fn=modelmerger,
inputs=[
primary_model_name,
secondary_model_name,
interp_method,
interp_amount,
save_as_half,
custom_name,
],
outputs=[
submit_result,
primary_model_name,
secondary_model_name,
component_dict['sd_model_checkpoint'],
]
)
paste_field_names = ['Prompt', 'Negative prompt', 'Steps', 'Face restoration', 'Seed', 'Size-1', 'Size-2']
txt2img_fields = [field for field,name in txt2img_paste_fields if name in paste_field_names]
img2img_fields = [field for field,name in img2img_paste_fields if name in paste_field_names]
send_to_img2img.click(
fn=lambda img, *args: (image_from_url_text(img),*args),
_js="(gallery, ...args) => [extract_image_from_gallery_img2img(gallery), ...args]",
inputs=[txt2img_gallery] + txt2img_fields,
outputs=[init_img] + img2img_fields,
)
send_to_inpaint.click(
fn=lambda x, *args: (image_from_url_text(x), *args),
_js="(gallery, ...args) => [extract_image_from_gallery_inpaint(gallery), ...args]",
inputs=[txt2img_gallery] + txt2img_fields,
outputs=[init_img_with_mask] + img2img_fields,
)
img2img_send_to_img2img.click(
fn=lambda x: image_from_url_text(x),
_js="extract_image_from_gallery_img2img",
inputs=[img2img_gallery],
outputs=[init_img],
)
img2img_send_to_inpaint.click(
fn=lambda x: image_from_url_text(x),
_js="extract_image_from_gallery_inpaint",
inputs=[img2img_gallery],
outputs=[init_img_with_mask],
)
send_to_extras.click(
fn=lambda x: image_from_url_text(x),
_js="extract_image_from_gallery_extras",
inputs=[txt2img_gallery],
outputs=[extras_image],
)
open_txt2img_folder.click(
fn=lambda: open_folder(opts.outdir_samples or opts.outdir_txt2img_samples),
inputs=[],
outputs=[],
)
open_img2img_folder.click(
fn=lambda: open_folder(opts.outdir_samples or opts.outdir_img2img_samples),
inputs=[],
outputs=[],
)
open_extras_folder.click(
fn=lambda: open_folder(opts.outdir_samples or opts.outdir_extras_samples),
inputs=[],
outputs=[],
)
img2img_send_to_extras.click(
fn=lambda x: image_from_url_text(x),
_js="extract_image_from_gallery_extras",
inputs=[img2img_gallery],
outputs=[extras_image],
)
modules.generation_parameters_copypaste.connect_paste(pnginfo_send_to_txt2img, txt2img_paste_fields, generation_info, 'switch_to_txt2img')
modules.generation_parameters_copypaste.connect_paste(pnginfo_send_to_img2img, img2img_paste_fields, generation_info, 'switch_to_img2img_img2img')
ui_config_file = cmd_opts.ui_config_file
ui_settings = {}
settings_count = len(ui_settings)
error_loading = False
try:
if os.path.exists(ui_config_file):
with open(ui_config_file, "r", encoding="utf8") as file:
ui_settings = json.load(file)
except Exception:
error_loading = True
print("Error loading settings:", file=sys.stderr)
print(traceback.format_exc(), file=sys.stderr)
def loadsave(path, x):
def apply_field(obj, field, condition=None):
key = path + "/" + field
if getattr(obj,'custom_script_source',None) is not None:
key = 'customscript/' + obj.custom_script_source + '/' + key
if getattr(obj, 'do_not_save_to_config', False):
return
saved_value = ui_settings.get(key, None)
if saved_value is None:
ui_settings[key] = getattr(obj, field)
elif condition is None or condition(saved_value):
setattr(obj, field, saved_value)
if type(x) in [gr.Slider, gr.Radio, gr.Checkbox, gr.Textbox, gr.Number] and x.visible:
apply_field(x, 'visible')
if type(x) == gr.Slider:
apply_field(x, 'value')
apply_field(x, 'minimum')
apply_field(x, 'maximum')
apply_field(x, 'step')
if type(x) == gr.Radio:
apply_field(x, 'value', lambda val: val in x.choices)
if type(x) == gr.Checkbox:
apply_field(x, 'value')
if type(x) == gr.Textbox:
apply_field(x, 'value')
if type(x) == gr.Number:
apply_field(x, 'value')
visit(txt2img_interface, loadsave, "txt2img")
visit(img2img_interface, loadsave, "img2img")
visit(extras_interface, loadsave, "extras")
if not error_loading and (not os.path.exists(ui_config_file) or settings_count != len(ui_settings)):
with open(ui_config_file, "w", encoding="utf8") as file:
json.dump(ui_settings, file, indent=4)
return demo
with open(os.path.join(script_path, "script.js"), "r", encoding="utf8") as jsfile:
javascript = f'<script>{jsfile.read()}</script>'
jsdir = os.path.join(script_path, "javascript")
for filename in sorted(os.listdir(jsdir)):
with open(os.path.join(jsdir, filename), "r", encoding="utf8") as jsfile:
javascript += f"\n<script>{jsfile.read()}</script>"
if 'gradio_routes_templates_response' not in globals():
def template_response(*args, **kwargs):
res = gradio_routes_templates_response(*args, **kwargs)
res.body = res.body.replace(b'</head>', f'{javascript}</head>'.encode("utf8"))
res.init_headers()
return res
gradio_routes_templates_response = gradio.routes.templates.TemplateResponse
gradio.routes.templates.TemplateResponse = template_response
<|code_end|>
|
modules/ui.py
<|code_start|>import base64
import html
import io
import json
import math
import mimetypes
import os
import random
import sys
import time
import traceback
import platform
import subprocess as sp
from functools import reduce
import numpy as np
import torch
from PIL import Image, PngImagePlugin
import piexif
import gradio as gr
import gradio.utils
import gradio.routes
from modules import sd_hijack
from modules.paths import script_path
from modules.shared import opts, cmd_opts
import modules.shared as shared
from modules.sd_samplers import samplers, samplers_for_img2img
from modules.sd_hijack import model_hijack
import modules.ldsr_model
import modules.scripts
import modules.gfpgan_model
import modules.codeformer_model
import modules.styles
import modules.generation_parameters_copypaste
from modules.prompt_parser import get_learned_conditioning_prompt_schedules
from modules.images import apply_filename_pattern, get_next_sequence_number
import modules.textual_inversion.ui
# this is a fix for Windows users. Without it, javascript files will be served with text/html content-type and the bowser will not show any UI
mimetypes.init()
mimetypes.add_type('application/javascript', '.js')
if not cmd_opts.share and not cmd_opts.listen:
# fix gradio phoning home
gradio.utils.version_check = lambda: None
gradio.utils.get_local_ip_address = lambda: '127.0.0.1'
def gr_show(visible=True):
return {"visible": visible, "__type__": "update"}
sample_img2img = "assets/stable-samples/img2img/sketch-mountains-input.jpg"
sample_img2img = sample_img2img if os.path.exists(sample_img2img) else None
css_hide_progressbar = """
.wrap .m-12 svg { display:none!important; }
.wrap .m-12::before { content:"Loading..." }
.progress-bar { display:none!important; }
.meta-text { display:none!important; }
"""
# Using constants for these since the variation selector isn't visible.
# Important that they exactly match script.js for tooltip to work.
random_symbol = '\U0001f3b2\ufe0f' # 🎲️
reuse_symbol = '\u267b\ufe0f' # ♻️
art_symbol = '\U0001f3a8' # 🎨
paste_symbol = '\u2199\ufe0f' # ↙
folder_symbol = '\U0001f4c2' # 📂
def plaintext_to_html(text):
text = "<p>" + "<br>\n".join([f"{html.escape(x)}" for x in text.split('\n')]) + "</p>"
return text
def image_from_url_text(filedata):
if type(filedata) == list:
if len(filedata) == 0:
return None
filedata = filedata[0]
if filedata.startswith("data:image/png;base64,"):
filedata = filedata[len("data:image/png;base64,"):]
filedata = base64.decodebytes(filedata.encode('utf-8'))
image = Image.open(io.BytesIO(filedata))
return image
def send_gradio_gallery_to_image(x):
if len(x) == 0:
return None
return image_from_url_text(x[0])
def save_files(js_data, images, index):
import csv
filenames = []
#quick dictionary to class object conversion. Its neccesary due apply_filename_pattern requiring it
class MyObject:
def __init__(self, d=None):
if d is not None:
for key, value in d.items():
setattr(self, key, value)
data = json.loads(js_data)
p = MyObject(data)
path = opts.outdir_save
save_to_dirs = opts.use_save_to_dirs_for_ui
if save_to_dirs:
dirname = apply_filename_pattern(opts.directories_filename_pattern or "[prompt_words]", p, p.seed, p.prompt)
path = os.path.join(opts.outdir_save, dirname)
os.makedirs(path, exist_ok=True)
if index > -1 and opts.save_selected_only and (index >= data["index_of_first_image"]): # ensures we are looking at a specific non-grid picture, and we have save_selected_only
images = [images[index]]
infotexts = [data["infotexts"][index]]
else:
infotexts = data["infotexts"]
with open(os.path.join(opts.outdir_save, "log.csv"), "a", encoding="utf8", newline='') as file:
at_start = file.tell() == 0
writer = csv.writer(file)
if at_start:
writer.writerow(["prompt", "seed", "width", "height", "sampler", "cfgs", "steps", "filename", "negative_prompt"])
file_decoration = opts.samples_filename_pattern or "[seed]-[prompt_spaces]"
if file_decoration != "":
file_decoration = "-" + file_decoration.lower()
file_decoration = apply_filename_pattern(file_decoration, p, p.seed, p.prompt)
truncated = (file_decoration[:240] + '..') if len(file_decoration) > 240 else file_decoration
filename_base = truncated
extension = opts.samples_format.lower()
basecount = get_next_sequence_number(path, "")
for i, filedata in enumerate(images):
file_number = f"{basecount+i:05}"
filename = file_number + filename_base + f".{extension}"
filepath = os.path.join(path, filename)
if filedata.startswith("data:image/png;base64,"):
filedata = filedata[len("data:image/png;base64,"):]
image = Image.open(io.BytesIO(base64.decodebytes(filedata.encode('utf-8'))))
if opts.enable_pnginfo and extension == 'png':
pnginfo = PngImagePlugin.PngInfo()
pnginfo.add_text('parameters', infotexts[i])
image.save(filepath, pnginfo=pnginfo)
else:
image.save(filepath, quality=opts.jpeg_quality)
if opts.enable_pnginfo and extension in ("jpg", "jpeg", "webp"):
piexif.insert(piexif.dump({"Exif": {
piexif.ExifIFD.UserComment: piexif.helper.UserComment.dump(infotexts[i], encoding="unicode")
}}), filepath)
filenames.append(filename)
writer.writerow([data["prompt"], data["seed"], data["width"], data["height"], data["sampler"], data["cfg_scale"], data["steps"], filenames[0], data["negative_prompt"]])
return '', '', plaintext_to_html(f"Saved: {filenames[0]}")
def wrap_gradio_call(func, extra_outputs=None):
def f(*args, extra_outputs_array=extra_outputs, **kwargs):
run_memmon = opts.memmon_poll_rate > 0 and not shared.mem_mon.disabled
if run_memmon:
shared.mem_mon.monitor()
t = time.perf_counter()
try:
res = list(func(*args, **kwargs))
except Exception as e:
print("Error completing request", file=sys.stderr)
print("Arguments:", args, kwargs, file=sys.stderr)
print(traceback.format_exc(), file=sys.stderr)
shared.state.job = ""
shared.state.job_count = 0
if extra_outputs_array is None:
extra_outputs_array = [None, '']
res = extra_outputs_array + [f"<div class='error'>{plaintext_to_html(type(e).__name__+': '+str(e))}</div>"]
elapsed = time.perf_counter() - t
elapsed_m = int(elapsed // 60)
elapsed_s = elapsed % 60
elapsed_text = f"{elapsed_s:.2f}s"
if (elapsed_m > 0):
elapsed_text = f"{elapsed_m}m "+elapsed_text
if run_memmon:
mem_stats = {k: -(v//-(1024*1024)) for k, v in shared.mem_mon.stop().items()}
active_peak = mem_stats['active_peak']
reserved_peak = mem_stats['reserved_peak']
sys_peak = mem_stats['system_peak']
sys_total = mem_stats['total']
sys_pct = round(sys_peak/max(sys_total, 1) * 100, 2)
vram_html = f"<p class='vram'>Torch active/reserved: {active_peak}/{reserved_peak} MiB, <wbr>Sys VRAM: {sys_peak}/{sys_total} MiB ({sys_pct}%)</p>"
else:
vram_html = ''
# last item is always HTML
res[-1] += f"<div class='performance'><p class='time'>Time taken: <wbr>{elapsed_text}</p>{vram_html}</div>"
shared.state.interrupted = False
shared.state.job_count = 0
return tuple(res)
return f
def check_progress_call(id_part):
if shared.state.job_count == 0:
return "", gr_show(False), gr_show(False), gr_show(False)
progress = 0
if shared.state.job_count > 0:
progress += shared.state.job_no / shared.state.job_count
if shared.state.sampling_steps > 0:
progress += 1 / shared.state.job_count * shared.state.sampling_step / shared.state.sampling_steps
progress = min(progress, 1)
progressbar = ""
if opts.show_progressbar:
progressbar = f"""<div class='progressDiv'><div class='progress' style="width:{progress * 100}%">{str(int(progress*100))+"%" if progress > 0.01 else ""}</div></div>"""
image = gr_show(False)
preview_visibility = gr_show(False)
if opts.show_progress_every_n_steps > 0:
if shared.parallel_processing_allowed:
if shared.state.sampling_step - shared.state.current_image_sampling_step >= opts.show_progress_every_n_steps and shared.state.current_latent is not None:
shared.state.current_image = modules.sd_samplers.sample_to_image(shared.state.current_latent)
shared.state.current_image_sampling_step = shared.state.sampling_step
image = shared.state.current_image
if image is None:
image = gr.update(value=None)
else:
preview_visibility = gr_show(True)
if shared.state.textinfo is not None:
textinfo_result = gr.HTML.update(value=shared.state.textinfo, visible=True)
else:
textinfo_result = gr_show(False)
return f"<span id='{id_part}_progress_span' style='display: none'>{time.time()}</span><p>{progressbar}</p>", preview_visibility, image, textinfo_result
def check_progress_call_initial(id_part):
shared.state.job_count = -1
shared.state.current_latent = None
shared.state.current_image = None
shared.state.textinfo = None
return check_progress_call(id_part)
def roll_artist(prompt):
allowed_cats = set([x for x in shared.artist_db.categories() if len(opts.random_artist_categories)==0 or x in opts.random_artist_categories])
artist = random.choice([x for x in shared.artist_db.artists if x.category in allowed_cats])
return prompt + ", " + artist.name if prompt != '' else artist.name
def visit(x, func, path=""):
if hasattr(x, 'children'):
for c in x.children:
visit(c, func, path)
elif x.label is not None:
func(path + "/" + str(x.label), x)
def add_style(name: str, prompt: str, negative_prompt: str):
if name is None:
return [gr_show(), gr_show()]
style = modules.styles.PromptStyle(name, prompt, negative_prompt)
shared.prompt_styles.styles[style.name] = style
# Save all loaded prompt styles: this allows us to update the storage format in the future more easily, because we
# reserialize all styles every time we save them
shared.prompt_styles.save_styles(shared.styles_filename)
return [gr.Dropdown.update(visible=True, choices=list(shared.prompt_styles.styles)) for _ in range(4)]
def apply_styles(prompt, prompt_neg, style1_name, style2_name):
prompt = shared.prompt_styles.apply_styles_to_prompt(prompt, [style1_name, style2_name])
prompt_neg = shared.prompt_styles.apply_negative_styles_to_prompt(prompt_neg, [style1_name, style2_name])
return [gr.Textbox.update(value=prompt), gr.Textbox.update(value=prompt_neg), gr.Dropdown.update(value="None"), gr.Dropdown.update(value="None")]
def interrogate(image):
prompt = shared.interrogator.interrogate(image)
return gr_show(True) if prompt is None else prompt
def create_seed_inputs():
with gr.Row():
with gr.Box():
with gr.Row(elem_id='seed_row'):
seed = (gr.Textbox if cmd_opts.use_textbox_seed else gr.Number)(label='Seed', value=-1)
seed.style(container=False)
random_seed = gr.Button(random_symbol, elem_id='random_seed')
reuse_seed = gr.Button(reuse_symbol, elem_id='reuse_seed')
with gr.Box(elem_id='subseed_show_box'):
seed_checkbox = gr.Checkbox(label='Extra', elem_id='subseed_show', value=False)
# Components to show/hide based on the 'Extra' checkbox
seed_extras = []
with gr.Row(visible=False) as seed_extra_row_1:
seed_extras.append(seed_extra_row_1)
with gr.Box():
with gr.Row(elem_id='subseed_row'):
subseed = gr.Number(label='Variation seed', value=-1)
subseed.style(container=False)
random_subseed = gr.Button(random_symbol, elem_id='random_subseed')
reuse_subseed = gr.Button(reuse_symbol, elem_id='reuse_subseed')
subseed_strength = gr.Slider(label='Variation strength', value=0.0, minimum=0, maximum=1, step=0.01)
with gr.Row(visible=False) as seed_extra_row_2:
seed_extras.append(seed_extra_row_2)
seed_resize_from_w = gr.Slider(minimum=0, maximum=2048, step=64, label="Resize seed from width", value=0)
seed_resize_from_h = gr.Slider(minimum=0, maximum=2048, step=64, label="Resize seed from height", value=0)
random_seed.click(fn=lambda: -1, show_progress=False, inputs=[], outputs=[seed])
random_subseed.click(fn=lambda: -1, show_progress=False, inputs=[], outputs=[subseed])
def change_visibility(show):
return {comp: gr_show(show) for comp in seed_extras}
seed_checkbox.change(change_visibility, show_progress=False, inputs=[seed_checkbox], outputs=seed_extras)
return seed, reuse_seed, subseed, reuse_subseed, subseed_strength, seed_resize_from_h, seed_resize_from_w, seed_checkbox
def connect_reuse_seed(seed: gr.Number, reuse_seed: gr.Button, generation_info: gr.Textbox, dummy_component, is_subseed):
""" Connects a 'reuse (sub)seed' button's click event so that it copies last used
(sub)seed value from generation info the to the seed field. If copying subseed and subseed strength
was 0, i.e. no variation seed was used, it copies the normal seed value instead."""
def copy_seed(gen_info_string: str, index):
res = -1
try:
gen_info = json.loads(gen_info_string)
index -= gen_info.get('index_of_first_image', 0)
if is_subseed and gen_info.get('subseed_strength', 0) > 0:
all_subseeds = gen_info.get('all_subseeds', [-1])
res = all_subseeds[index if 0 <= index < len(all_subseeds) else 0]
else:
all_seeds = gen_info.get('all_seeds', [-1])
res = all_seeds[index if 0 <= index < len(all_seeds) else 0]
except json.decoder.JSONDecodeError as e:
if gen_info_string != '':
print("Error parsing JSON generation info:", file=sys.stderr)
print(gen_info_string, file=sys.stderr)
return [res, gr_show(False)]
reuse_seed.click(
fn=copy_seed,
_js="(x, y) => [x, selected_gallery_index()]",
show_progress=False,
inputs=[generation_info, dummy_component],
outputs=[seed, dummy_component]
)
def update_token_counter(text, steps):
try:
prompt_schedules = get_learned_conditioning_prompt_schedules([text], steps)
except Exception:
# a parsing error can happen here during typing, and we don't want to bother the user with
# messages related to it in console
prompt_schedules = [[[steps, text]]]
flat_prompts = reduce(lambda list1, list2: list1+list2, prompt_schedules)
prompts = [prompt_text for step, prompt_text in flat_prompts]
tokens, token_count, max_length = max([model_hijack.tokenize(prompt) for prompt in prompts], key=lambda args: args[1])
style_class = ' class="red"' if (token_count > max_length) else ""
return f"<span {style_class}>{token_count}/{max_length}</span>"
def create_toprow(is_img2img):
id_part = "img2img" if is_img2img else "txt2img"
with gr.Row(elem_id="toprow"):
with gr.Column(scale=4):
with gr.Row():
with gr.Column(scale=80):
with gr.Row():
prompt = gr.Textbox(label="Prompt", elem_id=f"{id_part}_prompt", show_label=False, placeholder="Prompt", lines=2)
with gr.Column(scale=1, elem_id="roll_col"):
roll = gr.Button(value=art_symbol, elem_id="roll", visible=len(shared.artist_db.artists) > 0)
paste = gr.Button(value=paste_symbol, elem_id="paste")
token_counter = gr.HTML(value="<span></span>", elem_id=f"{id_part}_token_counter")
token_button = gr.Button(visible=False, elem_id=f"{id_part}_token_button")
with gr.Column(scale=10, elem_id="style_pos_col"):
prompt_style = gr.Dropdown(label="Style 1", elem_id=f"{id_part}_style_index", choices=[k for k, v in shared.prompt_styles.styles.items()], value=next(iter(shared.prompt_styles.styles.keys())), visible=len(shared.prompt_styles.styles) > 1)
with gr.Row():
with gr.Column(scale=8):
negative_prompt = gr.Textbox(label="Negative prompt", elem_id="negative_prompt", show_label=False, placeholder="Negative prompt", lines=2)
with gr.Column(scale=1, elem_id="style_neg_col"):
prompt_style2 = gr.Dropdown(label="Style 2", elem_id=f"{id_part}_style2_index", choices=[k for k, v in shared.prompt_styles.styles.items()], value=next(iter(shared.prompt_styles.styles.keys())), visible=len(shared.prompt_styles.styles) > 1)
with gr.Column(scale=1):
with gr.Row():
interrupt = gr.Button('Interrupt', elem_id=f"{id_part}_interrupt")
submit = gr.Button('Generate', elem_id=f"{id_part}_generate", variant='primary')
interrupt.click(
fn=lambda: shared.state.interrupt(),
inputs=[],
outputs=[],
)
with gr.Row():
if is_img2img:
interrogate = gr.Button('Interrogate', elem_id="interrogate")
else:
interrogate = None
prompt_style_apply = gr.Button('Apply style', elem_id="style_apply")
save_style = gr.Button('Create style', elem_id="style_create")
return prompt, roll, prompt_style, negative_prompt, prompt_style2, submit, interrogate, prompt_style_apply, save_style, paste, token_counter, token_button
def setup_progressbar(progressbar, preview, id_part, textinfo=None):
if textinfo is None:
textinfo = gr.HTML(visible=False)
check_progress = gr.Button('Check progress', elem_id=f"{id_part}_check_progress", visible=False)
check_progress.click(
fn=lambda: check_progress_call(id_part),
show_progress=False,
inputs=[],
outputs=[progressbar, preview, preview, textinfo],
)
check_progress_initial = gr.Button('Check progress (first)', elem_id=f"{id_part}_check_progress_initial", visible=False)
check_progress_initial.click(
fn=lambda: check_progress_call_initial(id_part),
show_progress=False,
inputs=[],
outputs=[progressbar, preview, preview, textinfo],
)
def create_ui(wrap_gradio_gpu_call):
import modules.img2img
import modules.txt2img
with gr.Blocks(analytics_enabled=False) as txt2img_interface:
txt2img_prompt, roll, txt2img_prompt_style, txt2img_negative_prompt, txt2img_prompt_style2, submit, _, txt2img_prompt_style_apply, txt2img_save_style, paste, token_counter, token_button = create_toprow(is_img2img=False)
dummy_component = gr.Label(visible=False)
with gr.Row(elem_id='txt2img_progress_row'):
with gr.Column(scale=1):
pass
with gr.Column(scale=1):
progressbar = gr.HTML(elem_id="txt2img_progressbar")
txt2img_preview = gr.Image(elem_id='txt2img_preview', visible=False)
setup_progressbar(progressbar, txt2img_preview, 'txt2img')
with gr.Row().style(equal_height=False):
with gr.Column(variant='panel'):
steps = gr.Slider(minimum=1, maximum=150, step=1, label="Sampling Steps", value=20)
sampler_index = gr.Radio(label='Sampling method', elem_id="txt2img_sampling", choices=[x.name for x in samplers], value=samplers[0].name, type="index")
with gr.Group():
width = gr.Slider(minimum=64, maximum=2048, step=64, label="Width", value=512)
height = gr.Slider(minimum=64, maximum=2048, step=64, label="Height", value=512)
with gr.Row():
restore_faces = gr.Checkbox(label='Restore faces', value=False, visible=len(shared.face_restorers) > 1)
tiling = gr.Checkbox(label='Tiling', value=False)
enable_hr = gr.Checkbox(label='Highres. fix', value=False)
with gr.Row(visible=False) as hr_options:
scale_latent = gr.Checkbox(label='Scale latent', value=False)
denoising_strength = gr.Slider(minimum=0.0, maximum=1.0, step=0.01, label='Denoising strength', value=0.7)
with gr.Row():
batch_count = gr.Slider(minimum=1, maximum=cmd_opts.max_batch_count, step=1, label='Batch count', value=1)
batch_size = gr.Slider(minimum=1, maximum=8, step=1, label='Batch size', value=1)
cfg_scale = gr.Slider(minimum=1.0, maximum=30.0, step=0.5, label='CFG Scale', value=7.0)
seed, reuse_seed, subseed, reuse_subseed, subseed_strength, seed_resize_from_h, seed_resize_from_w, seed_checkbox = create_seed_inputs()
with gr.Group():
custom_inputs = modules.scripts.scripts_txt2img.setup_ui(is_img2img=False)
with gr.Column(variant='panel'):
with gr.Group():
txt2img_preview = gr.Image(elem_id='txt2img_preview', visible=False)
txt2img_gallery = gr.Gallery(label='Output', show_label=False, elem_id='txt2img_gallery').style(grid=4)
with gr.Group():
with gr.Row():
save = gr.Button('Save')
send_to_img2img = gr.Button('Send to img2img')
send_to_inpaint = gr.Button('Send to inpaint')
send_to_extras = gr.Button('Send to extras')
button_id = "hidden_element" if shared.cmd_opts.hide_ui_dir_config else 'open_folder'
open_txt2img_folder = gr.Button(folder_symbol, elem_id=button_id)
with gr.Group():
html_info = gr.HTML()
generation_info = gr.Textbox(visible=False)
connect_reuse_seed(seed, reuse_seed, generation_info, dummy_component, is_subseed=False)
connect_reuse_seed(subseed, reuse_subseed, generation_info, dummy_component, is_subseed=True)
txt2img_args = dict(
fn=wrap_gradio_gpu_call(modules.txt2img.txt2img),
_js="submit",
inputs=[
txt2img_prompt,
txt2img_negative_prompt,
txt2img_prompt_style,
txt2img_prompt_style2,
steps,
sampler_index,
restore_faces,
tiling,
batch_count,
batch_size,
cfg_scale,
seed,
subseed, subseed_strength, seed_resize_from_h, seed_resize_from_w, seed_checkbox,
height,
width,
enable_hr,
scale_latent,
denoising_strength,
] + custom_inputs,
outputs=[
txt2img_gallery,
generation_info,
html_info
],
show_progress=False,
)
txt2img_prompt.submit(**txt2img_args)
submit.click(**txt2img_args)
enable_hr.change(
fn=lambda x: gr_show(x),
inputs=[enable_hr],
outputs=[hr_options],
)
save.click(
fn=wrap_gradio_call(save_files),
_js="(x, y, z) => [x, y, selected_gallery_index()]",
inputs=[
generation_info,
txt2img_gallery,
html_info,
],
outputs=[
html_info,
html_info,
html_info,
]
)
roll.click(
fn=roll_artist,
_js="update_txt2img_tokens",
inputs=[
txt2img_prompt,
],
outputs=[
txt2img_prompt,
]
)
txt2img_paste_fields = [
(txt2img_prompt, "Prompt"),
(txt2img_negative_prompt, "Negative prompt"),
(steps, "Steps"),
(sampler_index, "Sampler"),
(restore_faces, "Face restoration"),
(cfg_scale, "CFG scale"),
(seed, "Seed"),
(width, "Size-1"),
(height, "Size-2"),
(batch_size, "Batch size"),
(subseed, "Variation seed"),
(subseed_strength, "Variation seed strength"),
(seed_resize_from_w, "Seed resize from-1"),
(seed_resize_from_h, "Seed resize from-2"),
(denoising_strength, "Denoising strength"),
(enable_hr, lambda d: "Denoising strength" in d),
(hr_options, lambda d: gr.Row.update(visible="Denoising strength" in d)),
]
modules.generation_parameters_copypaste.connect_paste(paste, txt2img_paste_fields, txt2img_prompt)
token_button.click(fn=update_token_counter, inputs=[txt2img_prompt, steps], outputs=[token_counter])
with gr.Blocks(analytics_enabled=False) as img2img_interface:
img2img_prompt, roll, img2img_prompt_style, img2img_negative_prompt, img2img_prompt_style2, submit, img2img_interrogate, img2img_prompt_style_apply, img2img_save_style, paste, token_counter, token_button = create_toprow(is_img2img=True)
with gr.Row(elem_id='img2img_progress_row'):
with gr.Column(scale=1):
pass
with gr.Column(scale=1):
progressbar = gr.HTML(elem_id="img2img_progressbar")
img2img_preview = gr.Image(elem_id='img2img_preview', visible=False)
setup_progressbar(progressbar, img2img_preview, 'img2img')
with gr.Row().style(equal_height=False):
with gr.Column(variant='panel'):
with gr.Tabs(elem_id="mode_img2img") as tabs_img2img_mode:
with gr.TabItem('img2img', id='img2img'):
init_img = gr.Image(label="Image for img2img", elem_id="img2img_image", show_label=False, source="upload", interactive=True, type="pil", tool=cmd_opts.gradio_img2img_tool)
with gr.TabItem('Inpaint', id='inpaint'):
init_img_with_mask = gr.Image(label="Image for inpainting with mask", show_label=False, elem_id="img2maskimg", source="upload", interactive=True, type="pil", tool="sketch", image_mode="RGBA")
init_img_inpaint = gr.Image(label="Image for img2img", show_label=False, source="upload", interactive=True, type="pil", visible=False, elem_id="img_inpaint_base")
init_mask_inpaint = gr.Image(label="Mask", source="upload", interactive=True, type="pil", visible=False, elem_id="img_inpaint_mask")
mask_blur = gr.Slider(label='Mask blur', minimum=0, maximum=64, step=1, value=4)
with gr.Row():
mask_mode = gr.Radio(label="Mask mode", show_label=False, choices=["Draw mask", "Upload mask"], type="index", value="Draw mask", elem_id="mask_mode")
inpainting_mask_invert = gr.Radio(label='Masking mode', show_label=False, choices=['Inpaint masked', 'Inpaint not masked'], value='Inpaint masked', type="index")
inpainting_fill = gr.Radio(label='Masked content', choices=['fill', 'original', 'latent noise', 'latent nothing'], value='original', type="index")
with gr.Row():
inpaint_full_res = gr.Checkbox(label='Inpaint at full resolution', value=False)
inpaint_full_res_padding = gr.Slider(label='Inpaint at full resolution padding, pixels', minimum=0, maximum=256, step=4, value=32)
with gr.TabItem('Batch img2img', id='batch'):
hidden = '<br>Disabled when launched with --hide-ui-dir-config.' if shared.cmd_opts.hide_ui_dir_config else ''
gr.HTML(f"<p class=\"text-gray-500\">Process images in a directory on the same machine where the server is running.<br>Use an empty output directory to save pictures normally instead of writing to the output directory.{hidden}</p>")
img2img_batch_input_dir = gr.Textbox(label="Input directory", **shared.hide_dirs)
img2img_batch_output_dir = gr.Textbox(label="Output directory", **shared.hide_dirs)
with gr.Row():
resize_mode = gr.Radio(label="Resize mode", elem_id="resize_mode", show_label=False, choices=["Just resize", "Crop and resize", "Resize and fill"], type="index", value="Just resize")
steps = gr.Slider(minimum=1, maximum=150, step=1, label="Sampling Steps", value=20)
sampler_index = gr.Radio(label='Sampling method', choices=[x.name for x in samplers_for_img2img], value=samplers_for_img2img[0].name, type="index")
with gr.Group():
width = gr.Slider(minimum=64, maximum=2048, step=64, label="Width", value=512)
height = gr.Slider(minimum=64, maximum=2048, step=64, label="Height", value=512)
with gr.Row():
restore_faces = gr.Checkbox(label='Restore faces', value=False, visible=len(shared.face_restorers) > 1)
tiling = gr.Checkbox(label='Tiling', value=False)
with gr.Row():
batch_count = gr.Slider(minimum=1, maximum=cmd_opts.max_batch_count, step=1, label='Batch count', value=1)
batch_size = gr.Slider(minimum=1, maximum=8, step=1, label='Batch size', value=1)
with gr.Group():
cfg_scale = gr.Slider(minimum=1.0, maximum=30.0, step=0.5, label='CFG Scale', value=7.0)
denoising_strength = gr.Slider(minimum=0.0, maximum=1.0, step=0.01, label='Denoising strength', value=0.75)
seed, reuse_seed, subseed, reuse_subseed, subseed_strength, seed_resize_from_h, seed_resize_from_w, seed_checkbox = create_seed_inputs()
with gr.Group():
custom_inputs = modules.scripts.scripts_img2img.setup_ui(is_img2img=True)
with gr.Column(variant='panel'):
with gr.Group():
img2img_preview = gr.Image(elem_id='img2img_preview', visible=False)
img2img_gallery = gr.Gallery(label='Output', show_label=False, elem_id='img2img_gallery').style(grid=4)
with gr.Group():
with gr.Row():
save = gr.Button('Save')
img2img_send_to_img2img = gr.Button('Send to img2img')
img2img_send_to_inpaint = gr.Button('Send to inpaint')
img2img_send_to_extras = gr.Button('Send to extras')
button_id = "hidden_element" if shared.cmd_opts.hide_ui_dir_config else 'open_folder'
open_img2img_folder = gr.Button(folder_symbol, elem_id=button_id)
with gr.Group():
html_info = gr.HTML()
generation_info = gr.Textbox(visible=False)
connect_reuse_seed(seed, reuse_seed, generation_info, dummy_component, is_subseed=False)
connect_reuse_seed(subseed, reuse_subseed, generation_info, dummy_component, is_subseed=True)
mask_mode.change(
lambda mode, img: {
init_img_with_mask: gr_show(mode == 0),
init_img_inpaint: gr_show(mode == 1),
init_mask_inpaint: gr_show(mode == 1),
},
inputs=[mask_mode, init_img_with_mask],
outputs=[
init_img_with_mask,
init_img_inpaint,
init_mask_inpaint,
],
)
img2img_args = dict(
fn=wrap_gradio_gpu_call(modules.img2img.img2img),
_js="submit_img2img",
inputs=[
dummy_component,
img2img_prompt,
img2img_negative_prompt,
img2img_prompt_style,
img2img_prompt_style2,
init_img,
init_img_with_mask,
init_img_inpaint,
init_mask_inpaint,
mask_mode,
steps,
sampler_index,
mask_blur,
inpainting_fill,
restore_faces,
tiling,
batch_count,
batch_size,
cfg_scale,
denoising_strength,
seed,
subseed, subseed_strength, seed_resize_from_h, seed_resize_from_w, seed_checkbox,
height,
width,
resize_mode,
inpaint_full_res,
inpaint_full_res_padding,
inpainting_mask_invert,
img2img_batch_input_dir,
img2img_batch_output_dir,
] + custom_inputs,
outputs=[
img2img_gallery,
generation_info,
html_info
],
show_progress=False,
)
img2img_prompt.submit(**img2img_args)
submit.click(**img2img_args)
img2img_interrogate.click(
fn=interrogate,
inputs=[init_img],
outputs=[img2img_prompt],
)
save.click(
fn=wrap_gradio_call(save_files),
_js="(x, y, z) => [x, y, selected_gallery_index()]",
inputs=[
generation_info,
img2img_gallery,
html_info
],
outputs=[
html_info,
html_info,
html_info,
]
)
roll.click(
fn=roll_artist,
_js="update_img2img_tokens",
inputs=[
img2img_prompt,
],
outputs=[
img2img_prompt,
]
)
prompts = [(txt2img_prompt, txt2img_negative_prompt), (img2img_prompt, img2img_negative_prompt)]
style_dropdowns = [(txt2img_prompt_style, txt2img_prompt_style2), (img2img_prompt_style, img2img_prompt_style2)]
style_js_funcs = ["update_txt2img_tokens", "update_img2img_tokens"]
for button, (prompt, negative_prompt) in zip([txt2img_save_style, img2img_save_style], prompts):
button.click(
fn=add_style,
_js="ask_for_style_name",
# Have to pass empty dummy component here, because the JavaScript and Python function have to accept
# the same number of parameters, but we only know the style-name after the JavaScript prompt
inputs=[dummy_component, prompt, negative_prompt],
outputs=[txt2img_prompt_style, img2img_prompt_style, txt2img_prompt_style2, img2img_prompt_style2],
)
for button, (prompt, negative_prompt), (style1, style2), js_func in zip([txt2img_prompt_style_apply, img2img_prompt_style_apply], prompts, style_dropdowns, style_js_funcs):
button.click(
fn=apply_styles,
_js=js_func,
inputs=[prompt, negative_prompt, style1, style2],
outputs=[prompt, negative_prompt, style1, style2],
)
img2img_paste_fields = [
(img2img_prompt, "Prompt"),
(img2img_negative_prompt, "Negative prompt"),
(steps, "Steps"),
(sampler_index, "Sampler"),
(restore_faces, "Face restoration"),
(cfg_scale, "CFG scale"),
(seed, "Seed"),
(width, "Size-1"),
(height, "Size-2"),
(batch_size, "Batch size"),
(subseed, "Variation seed"),
(subseed_strength, "Variation seed strength"),
(seed_resize_from_w, "Seed resize from-1"),
(seed_resize_from_h, "Seed resize from-2"),
(denoising_strength, "Denoising strength"),
]
modules.generation_parameters_copypaste.connect_paste(paste, img2img_paste_fields, img2img_prompt)
token_button.click(fn=update_token_counter, inputs=[img2img_prompt, steps], outputs=[token_counter])
with gr.Blocks(analytics_enabled=False) as extras_interface:
with gr.Row().style(equal_height=False):
with gr.Column(variant='panel'):
with gr.Tabs(elem_id="mode_extras"):
with gr.TabItem('Single Image'):
extras_image = gr.Image(label="Source", source="upload", interactive=True, type="pil")
with gr.TabItem('Batch Process'):
image_batch = gr.File(label="Batch Process", file_count="multiple", interactive=True, type="file")
upscaling_resize = gr.Slider(minimum=1.0, maximum=4.0, step=0.05, label="Resize", value=2)
with gr.Group():
extras_upscaler_1 = gr.Radio(label='Upscaler 1', choices=[x.name for x in shared.sd_upscalers], value=shared.sd_upscalers[0].name, type="index")
with gr.Group():
extras_upscaler_2 = gr.Radio(label='Upscaler 2', choices=[x.name for x in shared.sd_upscalers], value=shared.sd_upscalers[0].name, type="index")
extras_upscaler_2_visibility = gr.Slider(minimum=0.0, maximum=1.0, step=0.001, label="Upscaler 2 visibility", value=1)
with gr.Group():
gfpgan_visibility = gr.Slider(minimum=0.0, maximum=1.0, step=0.001, label="GFPGAN visibility", value=0, interactive=modules.gfpgan_model.have_gfpgan)
with gr.Group():
codeformer_visibility = gr.Slider(minimum=0.0, maximum=1.0, step=0.001, label="CodeFormer visibility", value=0, interactive=modules.codeformer_model.have_codeformer)
codeformer_weight = gr.Slider(minimum=0.0, maximum=1.0, step=0.001, label="CodeFormer weight (0 = maximum effect, 1 = minimum effect)", value=0, interactive=modules.codeformer_model.have_codeformer)
submit = gr.Button('Generate', elem_id="extras_generate", variant='primary')
with gr.Column(variant='panel'):
result_images = gr.Gallery(label="Result", show_label=False)
html_info_x = gr.HTML()
html_info = gr.HTML()
extras_send_to_img2img = gr.Button('Send to img2img')
extras_send_to_inpaint = gr.Button('Send to inpaint')
button_id = "hidden_element" if shared.cmd_opts.hide_ui_dir_config else ''
open_extras_folder = gr.Button('Open output directory', elem_id=button_id)
submit.click(
fn=wrap_gradio_gpu_call(modules.extras.run_extras),
_js="get_extras_tab_index",
inputs=[
dummy_component,
extras_image,
image_batch,
gfpgan_visibility,
codeformer_visibility,
codeformer_weight,
upscaling_resize,
extras_upscaler_1,
extras_upscaler_2,
extras_upscaler_2_visibility,
],
outputs=[
result_images,
html_info_x,
html_info,
]
)
extras_send_to_img2img.click(
fn=lambda x: image_from_url_text(x),
_js="extract_image_from_gallery_img2img",
inputs=[result_images],
outputs=[init_img],
)
extras_send_to_inpaint.click(
fn=lambda x: image_from_url_text(x),
_js="extract_image_from_gallery_img2img",
inputs=[result_images],
outputs=[init_img_with_mask],
)
with gr.Blocks(analytics_enabled=False) as pnginfo_interface:
with gr.Row().style(equal_height=False):
with gr.Column(variant='panel'):
image = gr.Image(elem_id="pnginfo_image", label="Source", source="upload", interactive=True, type="pil")
with gr.Column(variant='panel'):
html = gr.HTML()
generation_info = gr.Textbox(visible=False)
html2 = gr.HTML()
with gr.Row():
pnginfo_send_to_txt2img = gr.Button('Send to txt2img')
pnginfo_send_to_img2img = gr.Button('Send to img2img')
image.change(
fn=wrap_gradio_call(modules.extras.run_pnginfo),
inputs=[image],
outputs=[html, generation_info, html2],
)
with gr.Blocks() as modelmerger_interface:
with gr.Row().style(equal_height=False):
with gr.Column(variant='panel'):
gr.HTML(value="<p>A merger of the two checkpoints will be generated in your <b>checkpoint</b> directory.</p>")
with gr.Row():
primary_model_name = gr.Dropdown(modules.sd_models.checkpoint_tiles(), elem_id="modelmerger_primary_model_name", label="Primary Model Name")
secondary_model_name = gr.Dropdown(modules.sd_models.checkpoint_tiles(), elem_id="modelmerger_secondary_model_name", label="Secondary Model Name")
custom_name = gr.Textbox(label="Custom Name (Optional)")
interp_amount = gr.Slider(minimum=0.0, maximum=1.0, step=0.05, label='Interpolation Amount', value=0.3)
interp_method = gr.Radio(choices=["Weighted Sum", "Sigmoid", "Inverse Sigmoid"], value="Weighted Sum", label="Interpolation Method")
save_as_half = gr.Checkbox(value=False, label="Safe as float16")
modelmerger_merge = gr.Button(elem_id="modelmerger_merge", label="Merge", variant='primary')
with gr.Column(variant='panel'):
submit_result = gr.Textbox(elem_id="modelmerger_result", show_label=False)
sd_hijack.model_hijack.embedding_db.load_textual_inversion_embeddings()
with gr.Blocks() as textual_inversion_interface:
with gr.Row().style(equal_height=False):
with gr.Column():
with gr.Group():
gr.HTML(value="<p style='margin-bottom: 0.7em'>See <b><a href=\"https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Textual-Inversion\">wiki</a></b> for detailed explanation.</p>")
gr.HTML(value="<p style='margin-bottom: 0.7em'>Create a new embedding</p>")
new_embedding_name = gr.Textbox(label="Name")
initialization_text = gr.Textbox(label="Initialization text", value="*")
nvpt = gr.Slider(label="Number of vectors per token", minimum=1, maximum=75, step=1, value=1)
with gr.Row():
with gr.Column(scale=3):
gr.HTML(value="")
with gr.Column():
create_embedding = gr.Button(value="Create", variant='primary')
with gr.Group():
gr.HTML(value="<p style='margin-bottom: 0.7em'>Preprocess images</p>")
process_src = gr.Textbox(label='Source directory')
process_dst = gr.Textbox(label='Destination directory')
with gr.Row():
process_flip = gr.Checkbox(label='Flip')
process_split = gr.Checkbox(label='Split into two')
process_caption = gr.Checkbox(label='Add caption')
with gr.Row():
with gr.Column(scale=3):
gr.HTML(value="")
with gr.Column():
run_preprocess = gr.Button(value="Preprocess", variant='primary')
with gr.Group():
gr.HTML(value="<p style='margin-bottom: 0.7em'>Train an embedding; must specify a directory with a set of 512x512 images</p>")
train_embedding_name = gr.Dropdown(label='Embedding', choices=sorted(sd_hijack.model_hijack.embedding_db.word_embeddings.keys()))
learn_rate = gr.Number(label='Learning rate', value=5.0e-03)
dataset_directory = gr.Textbox(label='Dataset directory', placeholder="Path to directory with input images")
log_directory = gr.Textbox(label='Log directory', placeholder="Path to directory where to write outputs", value="textual_inversion")
template_file = gr.Textbox(label='Prompt template file', value=os.path.join(script_path, "textual_inversion_templates", "style_filewords.txt"))
steps = gr.Number(label='Max steps', value=100000, precision=0)
create_image_every = gr.Number(label='Save an image to log directory every N steps, 0 to disable', value=500, precision=0)
save_embedding_every = gr.Number(label='Save a copy of embedding to log directory every N steps, 0 to disable', value=500, precision=0)
with gr.Row():
with gr.Column(scale=2):
gr.HTML(value="")
with gr.Column():
with gr.Row():
interrupt_training = gr.Button(value="Interrupt")
train_embedding = gr.Button(value="Train", variant='primary')
with gr.Column():
progressbar = gr.HTML(elem_id="ti_progressbar")
ti_output = gr.Text(elem_id="ti_output", value="", show_label=False)
ti_gallery = gr.Gallery(label='Output', show_label=False, elem_id='ti_gallery').style(grid=4)
ti_preview = gr.Image(elem_id='ti_preview', visible=False)
ti_progress = gr.HTML(elem_id="ti_progress", value="")
ti_outcome = gr.HTML(elem_id="ti_error", value="")
setup_progressbar(progressbar, ti_preview, 'ti', textinfo=ti_progress)
create_embedding.click(
fn=modules.textual_inversion.ui.create_embedding,
inputs=[
new_embedding_name,
initialization_text,
nvpt,
],
outputs=[
train_embedding_name,
ti_output,
ti_outcome,
]
)
run_preprocess.click(
fn=wrap_gradio_gpu_call(modules.textual_inversion.ui.preprocess, extra_outputs=[gr.update()]),
_js="start_training_textual_inversion",
inputs=[
process_src,
process_dst,
process_flip,
process_split,
process_caption,
],
outputs=[
ti_output,
ti_outcome,
],
)
train_embedding.click(
fn=wrap_gradio_gpu_call(modules.textual_inversion.ui.train_embedding, extra_outputs=[gr.update()]),
_js="start_training_textual_inversion",
inputs=[
train_embedding_name,
learn_rate,
dataset_directory,
log_directory,
steps,
create_image_every,
save_embedding_every,
template_file,
],
outputs=[
ti_output,
ti_outcome,
]
)
interrupt_training.click(
fn=lambda: shared.state.interrupt(),
inputs=[],
outputs=[],
)
def create_setting_component(key):
def fun():
return opts.data[key] if key in opts.data else opts.data_labels[key].default
info = opts.data_labels[key]
t = type(info.default)
args = info.component_args() if callable(info.component_args) else info.component_args
if info.component is not None:
comp = info.component
elif t == str:
comp = gr.Textbox
elif t == int:
comp = gr.Number
elif t == bool:
comp = gr.Checkbox
else:
raise Exception(f'bad options item type: {str(t)} for key {key}')
return comp(label=info.label, value=fun, **(args or {}))
components = []
component_dict = {}
def open_folder(f):
if not shared.cmd_opts.hide_ui_dir_config:
path = os.path.normpath(f)
if platform.system() == "Windows":
os.startfile(path)
elif platform.system() == "Darwin":
sp.Popen(["open", path])
else:
sp.Popen(["xdg-open", path])
def run_settings(*args):
changed = 0
for key, value, comp in zip(opts.data_labels.keys(), args, components):
if not opts.same_type(value, opts.data_labels[key].default):
return f"Bad value for setting {key}: {value}; expecting {type(opts.data_labels[key].default).__name__}"
for key, value, comp in zip(opts.data_labels.keys(), args, components):
comp_args = opts.data_labels[key].component_args
if comp_args and isinstance(comp_args, dict) and comp_args.get('visible') is False:
continue
oldval = opts.data.get(key, None)
opts.data[key] = value
if oldval != value:
if opts.data_labels[key].onchange is not None:
opts.data_labels[key].onchange()
changed += 1
opts.save(shared.config_filename)
return f'{changed} settings changed.', opts.dumpjson()
with gr.Blocks(analytics_enabled=False) as settings_interface:
settings_submit = gr.Button(value="Apply settings", variant='primary')
result = gr.HTML()
settings_cols = 3
items_per_col = int(len(opts.data_labels) * 0.9 / settings_cols)
cols_displayed = 0
items_displayed = 0
previous_section = None
column = None
with gr.Row(elem_id="settings").style(equal_height=False):
for i, (k, item) in enumerate(opts.data_labels.items()):
if previous_section != item.section:
if cols_displayed < settings_cols and (items_displayed >= items_per_col or previous_section is None):
if column is not None:
column.__exit__()
column = gr.Column(variant='panel')
column.__enter__()
items_displayed = 0
cols_displayed += 1
previous_section = item.section
gr.HTML(elem_id="settings_header_text_{}".format(item.section[0]), value='<h1 class="gr-button-lg">{}</h1>'.format(item.section[1]))
component = create_setting_component(k)
component_dict[k] = component
components.append(component)
items_displayed += 1
request_notifications = gr.Button(value='Request browser notifications', elem_id="request_notifications")
request_notifications.click(
fn=lambda: None,
inputs=[],
outputs=[],
_js='function(){}'
)
with gr.Row():
reload_script_bodies = gr.Button(value='Reload custom script bodies (No ui updates, No restart)', variant='secondary')
restart_gradio = gr.Button(value='Restart Gradio and Refresh components (Custom Scripts, ui.py, js and css only)', variant='primary')
def reload_scripts():
modules.scripts.reload_script_body_only()
reload_script_bodies.click(
fn=reload_scripts,
inputs=[],
outputs=[],
_js='function(){}'
)
def request_restart():
settings_interface.gradio_ref.do_restart = True
restart_gradio.click(
fn=request_restart,
inputs=[],
outputs=[],
_js='function(){restart_reload()}'
)
if column is not None:
column.__exit__()
interfaces = [
(txt2img_interface, "txt2img", "txt2img"),
(img2img_interface, "img2img", "img2img"),
(extras_interface, "Extras", "extras"),
(pnginfo_interface, "PNG Info", "pnginfo"),
(modelmerger_interface, "Checkpoint Merger", "modelmerger"),
(textual_inversion_interface, "Textual inversion", "ti"),
(settings_interface, "Settings", "settings"),
]
with open(os.path.join(script_path, "style.css"), "r", encoding="utf8") as file:
css = file.read()
if os.path.exists(os.path.join(script_path, "user.css")):
with open(os.path.join(script_path, "user.css"), "r", encoding="utf8") as file:
usercss = file.read()
css += usercss
if not cmd_opts.no_progressbar_hiding:
css += css_hide_progressbar
with gr.Blocks(css=css, analytics_enabled=False, title="Stable Diffusion") as demo:
settings_interface.gradio_ref = demo
with gr.Tabs() as tabs:
for interface, label, ifid in interfaces:
with gr.TabItem(label, id=ifid):
interface.render()
if os.path.exists(os.path.join(script_path, "notification.mp3")):
audio_notification = gr.Audio(interactive=False, value=os.path.join(script_path, "notification.mp3"), elem_id="audio_notification", visible=False)
text_settings = gr.Textbox(elem_id="settings_json", value=lambda: opts.dumpjson(), visible=False)
settings_submit.click(
fn=run_settings,
inputs=components,
outputs=[result, text_settings],
)
def modelmerger(*args):
try:
results = modules.extras.run_modelmerger(*args)
except Exception as e:
print("Error loading/saving model file:", file=sys.stderr)
print(traceback.format_exc(), file=sys.stderr)
modules.sd_models.list_models() # to remove the potentially missing models from the list
return ["Error loading/saving model file. It doesn't exist or the name contains illegal characters"] + [gr.Dropdown.update(choices=modules.sd_models.checkpoint_tiles()) for _ in range(3)]
return results
modelmerger_merge.click(
fn=modelmerger,
inputs=[
primary_model_name,
secondary_model_name,
interp_method,
interp_amount,
save_as_half,
custom_name,
],
outputs=[
submit_result,
primary_model_name,
secondary_model_name,
component_dict['sd_model_checkpoint'],
]
)
paste_field_names = ['Prompt', 'Negative prompt', 'Steps', 'Face restoration', 'Seed', 'Size-1', 'Size-2']
txt2img_fields = [field for field,name in txt2img_paste_fields if name in paste_field_names]
img2img_fields = [field for field,name in img2img_paste_fields if name in paste_field_names]
send_to_img2img.click(
fn=lambda img, *args: (image_from_url_text(img),*args),
_js="(gallery, ...args) => [extract_image_from_gallery_img2img(gallery), ...args]",
inputs=[txt2img_gallery] + txt2img_fields,
outputs=[init_img] + img2img_fields,
)
send_to_inpaint.click(
fn=lambda x, *args: (image_from_url_text(x), *args),
_js="(gallery, ...args) => [extract_image_from_gallery_inpaint(gallery), ...args]",
inputs=[txt2img_gallery] + txt2img_fields,
outputs=[init_img_with_mask] + img2img_fields,
)
img2img_send_to_img2img.click(
fn=lambda x: image_from_url_text(x),
_js="extract_image_from_gallery_img2img",
inputs=[img2img_gallery],
outputs=[init_img],
)
img2img_send_to_inpaint.click(
fn=lambda x: image_from_url_text(x),
_js="extract_image_from_gallery_inpaint",
inputs=[img2img_gallery],
outputs=[init_img_with_mask],
)
send_to_extras.click(
fn=lambda x: image_from_url_text(x),
_js="extract_image_from_gallery_extras",
inputs=[txt2img_gallery],
outputs=[extras_image],
)
open_txt2img_folder.click(
fn=lambda: open_folder(opts.outdir_samples or opts.outdir_txt2img_samples),
inputs=[],
outputs=[],
)
open_img2img_folder.click(
fn=lambda: open_folder(opts.outdir_samples or opts.outdir_img2img_samples),
inputs=[],
outputs=[],
)
open_extras_folder.click(
fn=lambda: open_folder(opts.outdir_samples or opts.outdir_extras_samples),
inputs=[],
outputs=[],
)
img2img_send_to_extras.click(
fn=lambda x: image_from_url_text(x),
_js="extract_image_from_gallery_extras",
inputs=[img2img_gallery],
outputs=[extras_image],
)
modules.generation_parameters_copypaste.connect_paste(pnginfo_send_to_txt2img, txt2img_paste_fields, generation_info, 'switch_to_txt2img')
modules.generation_parameters_copypaste.connect_paste(pnginfo_send_to_img2img, img2img_paste_fields, generation_info, 'switch_to_img2img_img2img')
ui_config_file = cmd_opts.ui_config_file
ui_settings = {}
settings_count = len(ui_settings)
error_loading = False
try:
if os.path.exists(ui_config_file):
with open(ui_config_file, "r", encoding="utf8") as file:
ui_settings = json.load(file)
except Exception:
error_loading = True
print("Error loading settings:", file=sys.stderr)
print(traceback.format_exc(), file=sys.stderr)
def loadsave(path, x):
def apply_field(obj, field, condition=None):
key = path + "/" + field
if getattr(obj,'custom_script_source',None) is not None:
key = 'customscript/' + obj.custom_script_source + '/' + key
if getattr(obj, 'do_not_save_to_config', False):
return
saved_value = ui_settings.get(key, None)
if saved_value is None:
ui_settings[key] = getattr(obj, field)
elif condition is None or condition(saved_value):
setattr(obj, field, saved_value)
if type(x) in [gr.Slider, gr.Radio, gr.Checkbox, gr.Textbox, gr.Number] and x.visible:
apply_field(x, 'visible')
if type(x) == gr.Slider:
apply_field(x, 'value')
apply_field(x, 'minimum')
apply_field(x, 'maximum')
apply_field(x, 'step')
if type(x) == gr.Radio:
apply_field(x, 'value', lambda val: val in x.choices)
if type(x) == gr.Checkbox:
apply_field(x, 'value')
if type(x) == gr.Textbox:
apply_field(x, 'value')
if type(x) == gr.Number:
apply_field(x, 'value')
visit(txt2img_interface, loadsave, "txt2img")
visit(img2img_interface, loadsave, "img2img")
visit(extras_interface, loadsave, "extras")
if not error_loading and (not os.path.exists(ui_config_file) or settings_count != len(ui_settings)):
with open(ui_config_file, "w", encoding="utf8") as file:
json.dump(ui_settings, file, indent=4)
return demo
with open(os.path.join(script_path, "script.js"), "r", encoding="utf8") as jsfile:
javascript = f'<script>{jsfile.read()}</script>'
jsdir = os.path.join(script_path, "javascript")
for filename in sorted(os.listdir(jsdir)):
with open(os.path.join(jsdir, filename), "r", encoding="utf8") as jsfile:
javascript += f"\n<script>{jsfile.read()}</script>"
if 'gradio_routes_templates_response' not in globals():
def template_response(*args, **kwargs):
res = gradio_routes_templates_response(*args, **kwargs)
res.body = res.body.replace(b'</head>', f'{javascript}</head>'.encode("utf8"))
res.init_headers()
return res
gradio_routes_templates_response = gradio.routes.templates.TemplateResponse
gradio.routes.templates.TemplateResponse = template_response
<|code_end|>
|
Keep -1 for seeds on X/Y plot checkbox isn't respected.
**Describe the bug**
Keep -1 for seeds on X/Y plot checkbox isn't respected
**To Reproduce**
Steps to reproduce the behavior:
1. txt2img
2. use Script, X/Y Plot, enter values, check "Keep -1 for seeds on X/Y plot"
3. Generate
4. See that all the seeds are all the same on both X and Y
**Expected behavior**
New seed for each image generated
**Desktop (please complete the following information):**
- OS: Windows
- Browser: Edge
- ca3e5519e8b6dc020c5e7ae508738afb5dc6f3ec
Keep -1 for seeds on X/Y plot checkbox isn't respected.
**Describe the bug**
Keep -1 for seeds on X/Y plot checkbox isn't respected
**To Reproduce**
Steps to reproduce the behavior:
1. txt2img
2. use Script, X/Y Plot, enter values, check "Keep -1 for seeds on X/Y plot"
3. Generate
4. See that all the seeds are all the same on both X and Y
**Expected behavior**
New seed for each image generated
**Desktop (please complete the following information):**
- OS: Windows
- Browser: Edge
- ca3e5519e8b6dc020c5e7ae508738afb5dc6f3ec
|
scripts/xy_grid.py
<|code_start|>from collections import namedtuple
from copy import copy
from itertools import permutations, chain
import random
import csv
from io import StringIO
from PIL import Image
import numpy as np
import modules.scripts as scripts
import gradio as gr
from modules import images
from modules.processing import process_images, Processed
from modules.shared import opts, cmd_opts, state
import modules.shared as shared
import modules.sd_samplers
import modules.sd_models
import re
def apply_field(field):
def fun(p, x, xs):
setattr(p, field, x)
return fun
def apply_prompt(p, x, xs):
p.prompt = p.prompt.replace(xs[0], x)
p.negative_prompt = p.negative_prompt.replace(xs[0], x)
def apply_order(p, x, xs):
token_order = []
# Initally grab the tokens from the prompt, so they can be replaced in order of earliest seen
for token in x:
token_order.append((p.prompt.find(token), token))
token_order.sort(key=lambda t: t[0])
prompt_parts = []
# Split the prompt up, taking out the tokens
for _, token in token_order:
n = p.prompt.find(token)
prompt_parts.append(p.prompt[0:n])
p.prompt = p.prompt[n + len(token):]
# Rebuild the prompt with the tokens in the order we want
prompt_tmp = ""
for idx, part in enumerate(prompt_parts):
prompt_tmp += part
prompt_tmp += x[idx]
p.prompt = prompt_tmp + p.prompt
samplers_dict = {}
for i, sampler in enumerate(modules.sd_samplers.samplers):
samplers_dict[sampler.name.lower()] = i
for alias in sampler.aliases:
samplers_dict[alias.lower()] = i
def apply_sampler(p, x, xs):
sampler_index = samplers_dict.get(x.lower(), None)
if sampler_index is None:
raise RuntimeError(f"Unknown sampler: {x}")
p.sampler_index = sampler_index
def apply_checkpoint(p, x, xs):
info = modules.sd_models.get_closet_checkpoint_match(x)
assert info is not None, f'Checkpoint for {x} not found'
modules.sd_models.reload_model_weights(shared.sd_model, info)
def format_value_add_label(p, opt, x):
if type(x) == float:
x = round(x, 8)
return f"{opt.label}: {x}"
def format_value(p, opt, x):
if type(x) == float:
x = round(x, 8)
return x
def format_value_join_list(p, opt, x):
return ", ".join(x)
def do_nothing(p, x, xs):
pass
def format_nothing(p, opt, x):
return ""
def str_permutations(x):
"""dummy function for specifying it in AxisOption's type when you want to get a list of permutations"""
return x
AxisOption = namedtuple("AxisOption", ["label", "type", "apply", "format_value"])
AxisOptionImg2Img = namedtuple("AxisOptionImg2Img", ["label", "type", "apply", "format_value"])
axis_options = [
AxisOption("Nothing", str, do_nothing, format_nothing),
AxisOption("Seed", int, apply_field("seed"), format_value_add_label),
AxisOption("Var. seed", int, apply_field("subseed"), format_value_add_label),
AxisOption("Var. strength", float, apply_field("subseed_strength"), format_value_add_label),
AxisOption("Steps", int, apply_field("steps"), format_value_add_label),
AxisOption("CFG Scale", float, apply_field("cfg_scale"), format_value_add_label),
AxisOption("Prompt S/R", str, apply_prompt, format_value),
AxisOption("Prompt order", str_permutations, apply_order, format_value_join_list),
AxisOption("Sampler", str, apply_sampler, format_value),
AxisOption("Checkpoint name", str, apply_checkpoint, format_value),
AxisOption("Sigma Churn", float, apply_field("s_churn"), format_value_add_label),
AxisOption("Sigma min", float, apply_field("s_tmin"), format_value_add_label),
AxisOption("Sigma max", float, apply_field("s_tmax"), format_value_add_label),
AxisOption("Sigma noise", float, apply_field("s_noise"), format_value_add_label),
AxisOption("Eta", float, apply_field("eta"), format_value_add_label),
AxisOptionImg2Img("Denoising", float, apply_field("denoising_strength"), format_value_add_label), # as it is now all AxisOptionImg2Img items must go after AxisOption ones
]
def draw_xy_grid(p, xs, ys, x_labels, y_labels, cell, draw_legend):
res = []
ver_texts = [[images.GridAnnotation(y)] for y in y_labels]
hor_texts = [[images.GridAnnotation(x)] for x in x_labels]
first_pocessed = None
state.job_count = len(xs) * len(ys) * p.n_iter
for iy, y in enumerate(ys):
for ix, x in enumerate(xs):
state.job = f"{ix + iy * len(xs) + 1} out of {len(xs) * len(ys)}"
processed = cell(x, y)
if first_pocessed is None:
first_pocessed = processed
try:
res.append(processed.images[0])
except:
res.append(Image.new(res[0].mode, res[0].size))
grid = images.image_grid(res, rows=len(ys))
if draw_legend:
grid = images.draw_grid_annotations(grid, res[0].width, res[0].height, hor_texts, ver_texts)
first_pocessed.images = [grid]
return first_pocessed
re_range = re.compile(r"\s*([+-]?\s*\d+)\s*-\s*([+-]?\s*\d+)(?:\s*\(([+-]\d+)\s*\))?\s*")
re_range_float = re.compile(r"\s*([+-]?\s*\d+(?:.\d*)?)\s*-\s*([+-]?\s*\d+(?:.\d*)?)(?:\s*\(([+-]\d+(?:.\d*)?)\s*\))?\s*")
re_range_count = re.compile(r"\s*([+-]?\s*\d+)\s*-\s*([+-]?\s*\d+)(?:\s*\[(\d+)\s*\])?\s*")
re_range_count_float = re.compile(r"\s*([+-]?\s*\d+(?:.\d*)?)\s*-\s*([+-]?\s*\d+(?:.\d*)?)(?:\s*\[(\d+(?:.\d*)?)\s*\])?\s*")
class Script(scripts.Script):
def title(self):
return "X/Y plot"
def ui(self, is_img2img):
current_axis_options = [x for x in axis_options if type(x) == AxisOption or type(x) == AxisOptionImg2Img and is_img2img]
with gr.Row():
x_type = gr.Dropdown(label="X type", choices=[x.label for x in current_axis_options], value=current_axis_options[1].label, visible=False, type="index", elem_id="x_type")
x_values = gr.Textbox(label="X values", visible=False, lines=1)
with gr.Row():
y_type = gr.Dropdown(label="Y type", choices=[x.label for x in current_axis_options], value=current_axis_options[4].label, visible=False, type="index", elem_id="y_type")
y_values = gr.Textbox(label="Y values", visible=False, lines=1)
draw_legend = gr.Checkbox(label='Draw legend', value=True)
no_fixed_seeds = gr.Checkbox(label='Keep -1 for seeds', value=False)
return [x_type, x_values, y_type, y_values, draw_legend, no_fixed_seeds]
def run(self, p, x_type, x_values, y_type, y_values, draw_legend, no_fixed_seeds):
modules.processing.fix_seed(p)
p.batch_size = 1
def process_axis(opt, vals):
if opt.label == 'Nothing':
return [0]
valslist = [x.strip() for x in chain.from_iterable(csv.reader(StringIO(vals)))]
if opt.type == int:
valslist_ext = []
for val in valslist:
m = re_range.fullmatch(val)
mc = re_range_count.fullmatch(val)
if m is not None:
start = int(m.group(1))
end = int(m.group(2))+1
step = int(m.group(3)) if m.group(3) is not None else 1
valslist_ext += list(range(start, end, step))
elif mc is not None:
start = int(mc.group(1))
end = int(mc.group(2))
num = int(mc.group(3)) if mc.group(3) is not None else 1
valslist_ext += [int(x) for x in np.linspace(start=start, stop=end, num=num).tolist()]
else:
valslist_ext.append(val)
valslist = valslist_ext
elif opt.type == float:
valslist_ext = []
for val in valslist:
m = re_range_float.fullmatch(val)
mc = re_range_count_float.fullmatch(val)
if m is not None:
start = float(m.group(1))
end = float(m.group(2))
step = float(m.group(3)) if m.group(3) is not None else 1
valslist_ext += np.arange(start, end + step, step).tolist()
elif mc is not None:
start = float(mc.group(1))
end = float(mc.group(2))
num = int(mc.group(3)) if mc.group(3) is not None else 1
valslist_ext += np.linspace(start=start, stop=end, num=num).tolist()
else:
valslist_ext.append(val)
valslist = valslist_ext
elif opt.type == str_permutations:
valslist = list(permutations(valslist))
valslist = [opt.type(x) for x in valslist]
return valslist
x_opt = axis_options[x_type]
xs = process_axis(x_opt, x_values)
y_opt = axis_options[y_type]
ys = process_axis(y_opt, y_values)
def fix_axis_seeds(axis_opt, axis_list):
if axis_opt.label == 'Seed':
return [int(random.randrange(4294967294)) if val is None or val == '' or val == -1 else val for val in axis_list]
else:
return axis_list
if not no_fixed_seeds:
xs = fix_axis_seeds(x_opt, xs)
ys = fix_axis_seeds(y_opt, ys)
if x_opt.label == 'Steps':
total_steps = sum(xs) * len(ys)
elif y_opt.label == 'Steps':
total_steps = sum(ys) * len(xs)
else:
total_steps = p.steps * len(xs) * len(ys)
print(f"X/Y plot will create {len(xs) * len(ys) * p.n_iter} images on a {len(xs)}x{len(ys)} grid. (Total steps to process: {total_steps * p.n_iter})")
shared.total_tqdm.updateTotal(total_steps * p.n_iter)
def cell(x, y):
pc = copy(p)
x_opt.apply(pc, x, xs)
y_opt.apply(pc, y, ys)
return process_images(pc)
processed = draw_xy_grid(
p,
xs=xs,
ys=ys,
x_labels=[x_opt.format_value(p, x_opt, x) for x in xs],
y_labels=[y_opt.format_value(p, y_opt, y) for y in ys],
cell=cell,
draw_legend=draw_legend
)
if opts.grid_save:
images.save_image(processed.images[0], p.outpath_grids, "xy_grid", prompt=p.prompt, seed=processed.seed, grid=True, p=p)
# restore checkpoint in case it was changed by axes
modules.sd_models.reload_model_weights(shared.sd_model)
return processed
<|code_end|>
|
scripts/xy_grid.py
<|code_start|>from collections import namedtuple
from copy import copy
from itertools import permutations, chain
import random
import csv
from io import StringIO
from PIL import Image
import numpy as np
import modules.scripts as scripts
import gradio as gr
from modules import images
from modules.processing import process_images, Processed
from modules.shared import opts, cmd_opts, state
import modules.shared as shared
import modules.sd_samplers
import modules.sd_models
import re
def apply_field(field):
def fun(p, x, xs):
setattr(p, field, x)
return fun
def apply_prompt(p, x, xs):
p.prompt = p.prompt.replace(xs[0], x)
p.negative_prompt = p.negative_prompt.replace(xs[0], x)
def apply_order(p, x, xs):
token_order = []
# Initally grab the tokens from the prompt, so they can be replaced in order of earliest seen
for token in x:
token_order.append((p.prompt.find(token), token))
token_order.sort(key=lambda t: t[0])
prompt_parts = []
# Split the prompt up, taking out the tokens
for _, token in token_order:
n = p.prompt.find(token)
prompt_parts.append(p.prompt[0:n])
p.prompt = p.prompt[n + len(token):]
# Rebuild the prompt with the tokens in the order we want
prompt_tmp = ""
for idx, part in enumerate(prompt_parts):
prompt_tmp += part
prompt_tmp += x[idx]
p.prompt = prompt_tmp + p.prompt
samplers_dict = {}
for i, sampler in enumerate(modules.sd_samplers.samplers):
samplers_dict[sampler.name.lower()] = i
for alias in sampler.aliases:
samplers_dict[alias.lower()] = i
def apply_sampler(p, x, xs):
sampler_index = samplers_dict.get(x.lower(), None)
if sampler_index is None:
raise RuntimeError(f"Unknown sampler: {x}")
p.sampler_index = sampler_index
def apply_checkpoint(p, x, xs):
info = modules.sd_models.get_closet_checkpoint_match(x)
assert info is not None, f'Checkpoint for {x} not found'
modules.sd_models.reload_model_weights(shared.sd_model, info)
def format_value_add_label(p, opt, x):
if type(x) == float:
x = round(x, 8)
return f"{opt.label}: {x}"
def format_value(p, opt, x):
if type(x) == float:
x = round(x, 8)
return x
def format_value_join_list(p, opt, x):
return ", ".join(x)
def do_nothing(p, x, xs):
pass
def format_nothing(p, opt, x):
return ""
def str_permutations(x):
"""dummy function for specifying it in AxisOption's type when you want to get a list of permutations"""
return x
AxisOption = namedtuple("AxisOption", ["label", "type", "apply", "format_value"])
AxisOptionImg2Img = namedtuple("AxisOptionImg2Img", ["label", "type", "apply", "format_value"])
axis_options = [
AxisOption("Nothing", str, do_nothing, format_nothing),
AxisOption("Seed", int, apply_field("seed"), format_value_add_label),
AxisOption("Var. seed", int, apply_field("subseed"), format_value_add_label),
AxisOption("Var. strength", float, apply_field("subseed_strength"), format_value_add_label),
AxisOption("Steps", int, apply_field("steps"), format_value_add_label),
AxisOption("CFG Scale", float, apply_field("cfg_scale"), format_value_add_label),
AxisOption("Prompt S/R", str, apply_prompt, format_value),
AxisOption("Prompt order", str_permutations, apply_order, format_value_join_list),
AxisOption("Sampler", str, apply_sampler, format_value),
AxisOption("Checkpoint name", str, apply_checkpoint, format_value),
AxisOption("Sigma Churn", float, apply_field("s_churn"), format_value_add_label),
AxisOption("Sigma min", float, apply_field("s_tmin"), format_value_add_label),
AxisOption("Sigma max", float, apply_field("s_tmax"), format_value_add_label),
AxisOption("Sigma noise", float, apply_field("s_noise"), format_value_add_label),
AxisOption("Eta", float, apply_field("eta"), format_value_add_label),
AxisOptionImg2Img("Denoising", float, apply_field("denoising_strength"), format_value_add_label), # as it is now all AxisOptionImg2Img items must go after AxisOption ones
]
def draw_xy_grid(p, xs, ys, x_labels, y_labels, cell, draw_legend):
res = []
ver_texts = [[images.GridAnnotation(y)] for y in y_labels]
hor_texts = [[images.GridAnnotation(x)] for x in x_labels]
first_pocessed = None
state.job_count = len(xs) * len(ys) * p.n_iter
for iy, y in enumerate(ys):
for ix, x in enumerate(xs):
state.job = f"{ix + iy * len(xs) + 1} out of {len(xs) * len(ys)}"
processed = cell(x, y)
if first_pocessed is None:
first_pocessed = processed
try:
res.append(processed.images[0])
except:
res.append(Image.new(res[0].mode, res[0].size))
grid = images.image_grid(res, rows=len(ys))
if draw_legend:
grid = images.draw_grid_annotations(grid, res[0].width, res[0].height, hor_texts, ver_texts)
first_pocessed.images = [grid]
return first_pocessed
re_range = re.compile(r"\s*([+-]?\s*\d+)\s*-\s*([+-]?\s*\d+)(?:\s*\(([+-]\d+)\s*\))?\s*")
re_range_float = re.compile(r"\s*([+-]?\s*\d+(?:.\d*)?)\s*-\s*([+-]?\s*\d+(?:.\d*)?)(?:\s*\(([+-]\d+(?:.\d*)?)\s*\))?\s*")
re_range_count = re.compile(r"\s*([+-]?\s*\d+)\s*-\s*([+-]?\s*\d+)(?:\s*\[(\d+)\s*\])?\s*")
re_range_count_float = re.compile(r"\s*([+-]?\s*\d+(?:.\d*)?)\s*-\s*([+-]?\s*\d+(?:.\d*)?)(?:\s*\[(\d+(?:.\d*)?)\s*\])?\s*")
class Script(scripts.Script):
def title(self):
return "X/Y plot"
def ui(self, is_img2img):
current_axis_options = [x for x in axis_options if type(x) == AxisOption or type(x) == AxisOptionImg2Img and is_img2img]
with gr.Row():
x_type = gr.Dropdown(label="X type", choices=[x.label for x in current_axis_options], value=current_axis_options[1].label, visible=False, type="index", elem_id="x_type")
x_values = gr.Textbox(label="X values", visible=False, lines=1)
with gr.Row():
y_type = gr.Dropdown(label="Y type", choices=[x.label for x in current_axis_options], value=current_axis_options[4].label, visible=False, type="index", elem_id="y_type")
y_values = gr.Textbox(label="Y values", visible=False, lines=1)
draw_legend = gr.Checkbox(label='Draw legend', value=True)
no_fixed_seeds = gr.Checkbox(label='Keep -1 for seeds', value=False)
return [x_type, x_values, y_type, y_values, draw_legend, no_fixed_seeds]
def run(self, p, x_type, x_values, y_type, y_values, draw_legend, no_fixed_seeds):
if not no_fixed_seeds:
modules.processing.fix_seed(p)
p.batch_size = 1
def process_axis(opt, vals):
if opt.label == 'Nothing':
return [0]
valslist = [x.strip() for x in chain.from_iterable(csv.reader(StringIO(vals)))]
if opt.type == int:
valslist_ext = []
for val in valslist:
m = re_range.fullmatch(val)
mc = re_range_count.fullmatch(val)
if m is not None:
start = int(m.group(1))
end = int(m.group(2))+1
step = int(m.group(3)) if m.group(3) is not None else 1
valslist_ext += list(range(start, end, step))
elif mc is not None:
start = int(mc.group(1))
end = int(mc.group(2))
num = int(mc.group(3)) if mc.group(3) is not None else 1
valslist_ext += [int(x) for x in np.linspace(start=start, stop=end, num=num).tolist()]
else:
valslist_ext.append(val)
valslist = valslist_ext
elif opt.type == float:
valslist_ext = []
for val in valslist:
m = re_range_float.fullmatch(val)
mc = re_range_count_float.fullmatch(val)
if m is not None:
start = float(m.group(1))
end = float(m.group(2))
step = float(m.group(3)) if m.group(3) is not None else 1
valslist_ext += np.arange(start, end + step, step).tolist()
elif mc is not None:
start = float(mc.group(1))
end = float(mc.group(2))
num = int(mc.group(3)) if mc.group(3) is not None else 1
valslist_ext += np.linspace(start=start, stop=end, num=num).tolist()
else:
valslist_ext.append(val)
valslist = valslist_ext
elif opt.type == str_permutations:
valslist = list(permutations(valslist))
valslist = [opt.type(x) for x in valslist]
return valslist
x_opt = axis_options[x_type]
xs = process_axis(x_opt, x_values)
y_opt = axis_options[y_type]
ys = process_axis(y_opt, y_values)
def fix_axis_seeds(axis_opt, axis_list):
if axis_opt.label == 'Seed':
return [int(random.randrange(4294967294)) if val is None or val == '' or val == -1 else val for val in axis_list]
else:
return axis_list
if not no_fixed_seeds:
xs = fix_axis_seeds(x_opt, xs)
ys = fix_axis_seeds(y_opt, ys)
if x_opt.label == 'Steps':
total_steps = sum(xs) * len(ys)
elif y_opt.label == 'Steps':
total_steps = sum(ys) * len(xs)
else:
total_steps = p.steps * len(xs) * len(ys)
print(f"X/Y plot will create {len(xs) * len(ys) * p.n_iter} images on a {len(xs)}x{len(ys)} grid. (Total steps to process: {total_steps * p.n_iter})")
shared.total_tqdm.updateTotal(total_steps * p.n_iter)
def cell(x, y):
pc = copy(p)
x_opt.apply(pc, x, xs)
y_opt.apply(pc, y, ys)
return process_images(pc)
processed = draw_xy_grid(
p,
xs=xs,
ys=ys,
x_labels=[x_opt.format_value(p, x_opt, x) for x in xs],
y_labels=[y_opt.format_value(p, y_opt, y) for y in ys],
cell=cell,
draw_legend=draw_legend
)
if opts.grid_save:
images.save_image(processed.images[0], p.outpath_grids, "xy_grid", prompt=p.prompt, seed=processed.seed, grid=True, p=p)
# restore checkpoint in case it was changed by axes
modules.sd_models.reload_model_weights(shared.sd_model)
return processed
<|code_end|>
|
(Feature Request) Add model/vae/hypernetwork file name to be saved in image info
Add selected model / vae / hypernetwork file name to be saved in image info too.
Maybe make it optional.
I do not know what i change in settings tab, but now i can't recreate same image again.
I have few models and hypernetworks for tests.
|
modules/processing.py
<|code_start|>import json
import math
import os
import sys
import torch
import numpy as np
from PIL import Image, ImageFilter, ImageOps
import random
import cv2
from skimage import exposure
import modules.sd_hijack
from modules import devices, prompt_parser, masking, sd_samplers, lowvram
from modules.sd_hijack import model_hijack
from modules.shared import opts, cmd_opts, state
import modules.shared as shared
import modules.face_restoration
import modules.images as images
import modules.styles
import logging
# some of those options should not be changed at all because they would break the model, so I removed them from options.
opt_C = 4
opt_f = 8
def setup_color_correction(image):
logging.info("Calibrating color correction.")
correction_target = cv2.cvtColor(np.asarray(image.copy()), cv2.COLOR_RGB2LAB)
return correction_target
def apply_color_correction(correction, image):
logging.info("Applying color correction.")
image = Image.fromarray(cv2.cvtColor(exposure.match_histograms(
cv2.cvtColor(
np.asarray(image),
cv2.COLOR_RGB2LAB
),
correction,
channel_axis=2
), cv2.COLOR_LAB2RGB).astype("uint8"))
return image
def get_correct_sampler(p):
if isinstance(p, modules.processing.StableDiffusionProcessingTxt2Img):
return sd_samplers.samplers
elif isinstance(p, modules.processing.StableDiffusionProcessingImg2Img):
return sd_samplers.samplers_for_img2img
class StableDiffusionProcessing:
def __init__(self, sd_model=None, outpath_samples=None, outpath_grids=None, prompt="", styles=None, seed=-1, subseed=-1, subseed_strength=0, seed_resize_from_h=-1, seed_resize_from_w=-1, seed_enable_extras=True, sampler_index=0, batch_size=1, n_iter=1, steps=50, cfg_scale=7.0, width=512, height=512, restore_faces=False, tiling=False, do_not_save_samples=False, do_not_save_grid=False, extra_generation_params=None, overlay_images=None, negative_prompt=None, eta=None):
self.sd_model = sd_model
self.outpath_samples: str = outpath_samples
self.outpath_grids: str = outpath_grids
self.prompt: str = prompt
self.prompt_for_display: str = None
self.negative_prompt: str = (negative_prompt or "")
self.styles: list = styles or []
self.seed: int = seed
self.subseed: int = subseed
self.subseed_strength: float = subseed_strength
self.seed_resize_from_h: int = seed_resize_from_h
self.seed_resize_from_w: int = seed_resize_from_w
self.sampler_index: int = sampler_index
self.batch_size: int = batch_size
self.n_iter: int = n_iter
self.steps: int = steps
self.cfg_scale: float = cfg_scale
self.width: int = width
self.height: int = height
self.restore_faces: bool = restore_faces
self.tiling: bool = tiling
self.do_not_save_samples: bool = do_not_save_samples
self.do_not_save_grid: bool = do_not_save_grid
self.extra_generation_params: dict = extra_generation_params or {}
self.overlay_images = overlay_images
self.eta = eta
self.paste_to = None
self.color_corrections = None
self.denoising_strength: float = 0
self.sampler_noise_scheduler_override = None
self.ddim_discretize = opts.ddim_discretize
self.s_churn = opts.s_churn
self.s_tmin = opts.s_tmin
self.s_tmax = float('inf') # not representable as a standard ui option
self.s_noise = opts.s_noise
if not seed_enable_extras:
self.subseed = -1
self.subseed_strength = 0
self.seed_resize_from_h = 0
self.seed_resize_from_w = 0
def init(self, all_prompts, all_seeds, all_subseeds):
pass
def sample(self, conditioning, unconditional_conditioning, seeds, subseeds, subseed_strength):
raise NotImplementedError()
class Processed:
def __init__(self, p: StableDiffusionProcessing, images_list, seed=-1, info="", subseed=None, all_prompts=None, all_seeds=None, all_subseeds=None, index_of_first_image=0, infotexts=None):
self.images = images_list
self.prompt = p.prompt
self.negative_prompt = p.negative_prompt
self.seed = seed
self.subseed = subseed
self.subseed_strength = p.subseed_strength
self.info = info
self.width = p.width
self.height = p.height
self.sampler_index = p.sampler_index
self.sampler = sd_samplers.samplers[p.sampler_index].name
self.cfg_scale = p.cfg_scale
self.steps = p.steps
self.batch_size = p.batch_size
self.restore_faces = p.restore_faces
self.face_restoration_model = opts.face_restoration_model if p.restore_faces else None
self.sd_model_hash = shared.sd_model.sd_model_hash
self.seed_resize_from_w = p.seed_resize_from_w
self.seed_resize_from_h = p.seed_resize_from_h
self.denoising_strength = getattr(p, 'denoising_strength', None)
self.extra_generation_params = p.extra_generation_params
self.index_of_first_image = index_of_first_image
self.styles = p.styles
self.job_timestamp = state.job_timestamp
self.clip_skip = opts.CLIP_ignore_last_layers
self.eta = p.eta
self.ddim_discretize = p.ddim_discretize
self.s_churn = p.s_churn
self.s_tmin = p.s_tmin
self.s_tmax = p.s_tmax
self.s_noise = p.s_noise
self.sampler_noise_scheduler_override = p.sampler_noise_scheduler_override
self.prompt = self.prompt if type(self.prompt) != list else self.prompt[0]
self.negative_prompt = self.negative_prompt if type(self.negative_prompt) != list else self.negative_prompt[0]
self.seed = int(self.seed if type(self.seed) != list else self.seed[0])
self.subseed = int(self.subseed if type(self.subseed) != list else self.subseed[0]) if self.subseed is not None else -1
self.all_prompts = all_prompts or [self.prompt]
self.all_seeds = all_seeds or [self.seed]
self.all_subseeds = all_subseeds or [self.subseed]
self.infotexts = infotexts or [info]
def js(self):
obj = {
"prompt": self.prompt,
"all_prompts": self.all_prompts,
"negative_prompt": self.negative_prompt,
"seed": self.seed,
"all_seeds": self.all_seeds,
"subseed": self.subseed,
"all_subseeds": self.all_subseeds,
"subseed_strength": self.subseed_strength,
"width": self.width,
"height": self.height,
"sampler_index": self.sampler_index,
"sampler": self.sampler,
"cfg_scale": self.cfg_scale,
"steps": self.steps,
"batch_size": self.batch_size,
"restore_faces": self.restore_faces,
"face_restoration_model": self.face_restoration_model,
"sd_model_hash": self.sd_model_hash,
"seed_resize_from_w": self.seed_resize_from_w,
"seed_resize_from_h": self.seed_resize_from_h,
"denoising_strength": self.denoising_strength,
"extra_generation_params": self.extra_generation_params,
"index_of_first_image": self.index_of_first_image,
"infotexts": self.infotexts,
"styles": self.styles,
"job_timestamp": self.job_timestamp,
"clip_skip": self.clip_skip,
}
return json.dumps(obj)
def infotext(self, p: StableDiffusionProcessing, index):
return create_infotext(p, self.all_prompts, self.all_seeds, self.all_subseeds, comments=[], position_in_batch=index % self.batch_size, iteration=index // self.batch_size)
# from https://discuss.pytorch.org/t/help-regarding-slerp-function-for-generative-model-sampling/32475/3
def slerp(val, low, high):
low_norm = low/torch.norm(low, dim=1, keepdim=True)
high_norm = high/torch.norm(high, dim=1, keepdim=True)
dot = (low_norm*high_norm).sum(1)
if dot.mean() > 0.9995:
return low * val + high * (1 - val)
omega = torch.acos(dot)
so = torch.sin(omega)
res = (torch.sin((1.0-val)*omega)/so).unsqueeze(1)*low + (torch.sin(val*omega)/so).unsqueeze(1) * high
return res
def create_random_tensors(shape, seeds, subseeds=None, subseed_strength=0.0, seed_resize_from_h=0, seed_resize_from_w=0, p=None):
xs = []
# if we have multiple seeds, this means we are working with batch size>1; this then
# enables the generation of additional tensors with noise that the sampler will use during its processing.
# Using those pre-generated tensors instead of simple torch.randn allows a batch with seeds [100, 101] to
# produce the same images as with two batches [100], [101].
if p is not None and p.sampler is not None and len(seeds) > 1 and opts.enable_batch_seeds:
sampler_noises = [[] for _ in range(p.sampler.number_of_needed_noises(p))]
else:
sampler_noises = None
for i, seed in enumerate(seeds):
noise_shape = shape if seed_resize_from_h <= 0 or seed_resize_from_w <= 0 else (shape[0], seed_resize_from_h//8, seed_resize_from_w//8)
subnoise = None
if subseeds is not None:
subseed = 0 if i >= len(subseeds) else subseeds[i]
subnoise = devices.randn(subseed, noise_shape)
# randn results depend on device; gpu and cpu get different results for same seed;
# the way I see it, it's better to do this on CPU, so that everyone gets same result;
# but the original script had it like this, so I do not dare change it for now because
# it will break everyone's seeds.
noise = devices.randn(seed, noise_shape)
if subnoise is not None:
noise = slerp(subseed_strength, noise, subnoise)
if noise_shape != shape:
x = devices.randn(seed, shape)
dx = (shape[2] - noise_shape[2]) // 2
dy = (shape[1] - noise_shape[1]) // 2
w = noise_shape[2] if dx >= 0 else noise_shape[2] + 2 * dx
h = noise_shape[1] if dy >= 0 else noise_shape[1] + 2 * dy
tx = 0 if dx < 0 else dx
ty = 0 if dy < 0 else dy
dx = max(-dx, 0)
dy = max(-dy, 0)
x[:, ty:ty+h, tx:tx+w] = noise[:, dy:dy+h, dx:dx+w]
noise = x
if sampler_noises is not None:
cnt = p.sampler.number_of_needed_noises(p)
for j in range(cnt):
sampler_noises[j].append(devices.randn_without_seed(tuple(noise_shape)))
xs.append(noise)
if sampler_noises is not None:
p.sampler.sampler_noises = [torch.stack(n).to(shared.device) for n in sampler_noises]
x = torch.stack(xs).to(shared.device)
return x
def get_fixed_seed(seed):
if seed is None or seed == '' or seed == -1:
return int(random.randrange(4294967294))
return seed
def fix_seed(p):
p.seed = get_fixed_seed(p.seed)
p.subseed = get_fixed_seed(p.subseed)
def create_infotext(p, all_prompts, all_seeds, all_subseeds, comments, iteration=0, position_in_batch=0):
index = position_in_batch + iteration * p.batch_size
clip_skip = getattr(p, 'clip_skip', opts.CLIP_ignore_last_layers)
generation_params = {
"Steps": p.steps,
"Sampler": get_correct_sampler(p)[p.sampler_index].name,
"CFG scale": p.cfg_scale,
"Seed": all_seeds[index],
"Face restoration": (opts.face_restoration_model if p.restore_faces else None),
"Size": f"{p.width}x{p.height}",
"Model hash": getattr(p, 'sd_model_hash', None if not opts.add_model_hash_to_info or not shared.sd_model.sd_model_hash else shared.sd_model.sd_model_hash),
"Batch size": (None if p.batch_size < 2 else p.batch_size),
"Batch pos": (None if p.batch_size < 2 else position_in_batch),
"Variation seed": (None if p.subseed_strength == 0 else all_subseeds[index]),
"Variation seed strength": (None if p.subseed_strength == 0 else p.subseed_strength),
"Seed resize from": (None if p.seed_resize_from_w == 0 or p.seed_resize_from_h == 0 else f"{p.seed_resize_from_w}x{p.seed_resize_from_h}"),
"Denoising strength": getattr(p, 'denoising_strength', None),
"Eta": (None if p.sampler is None or p.sampler.eta == p.sampler.default_eta else p.sampler.eta),
"Clip skip": None if clip_skip==0 else clip_skip,
}
generation_params.update(p.extra_generation_params)
generation_params_text = ", ".join([k if k == v else f'{k}: {v}' for k, v in generation_params.items() if v is not None])
negative_prompt_text = "\nNegative prompt: " + p.negative_prompt if p.negative_prompt else ""
return f"{all_prompts[index]}{negative_prompt_text}\n{generation_params_text}".strip()
def process_images(p: StableDiffusionProcessing) -> Processed:
"""this is the main loop that both txt2img and img2img use; it calls func_init once inside all the scopes and func_sample once per batch"""
if type(p.prompt) == list:
assert(len(p.prompt) > 0)
else:
assert p.prompt is not None
devices.torch_gc()
seed = get_fixed_seed(p.seed)
subseed = get_fixed_seed(p.subseed)
if p.outpath_samples is not None:
os.makedirs(p.outpath_samples, exist_ok=True)
if p.outpath_grids is not None:
os.makedirs(p.outpath_grids, exist_ok=True)
modules.sd_hijack.model_hijack.apply_circular(p.tiling)
modules.sd_hijack.model_hijack.clear_comments()
comments = {}
shared.prompt_styles.apply_styles(p)
if type(p.prompt) == list:
all_prompts = p.prompt
else:
all_prompts = p.batch_size * p.n_iter * [p.prompt]
if type(seed) == list:
all_seeds = seed
else:
all_seeds = [int(seed) + (x if p.subseed_strength == 0 else 0) for x in range(len(all_prompts))]
if type(subseed) == list:
all_subseeds = subseed
else:
all_subseeds = [int(subseed) + x for x in range(len(all_prompts))]
def infotext(iteration=0, position_in_batch=0):
return create_infotext(p, all_prompts, all_seeds, all_subseeds, comments, iteration, position_in_batch)
if os.path.exists(cmd_opts.embeddings_dir):
model_hijack.embedding_db.load_textual_inversion_embeddings()
infotexts = []
output_images = []
with torch.no_grad(), p.sd_model.ema_scope():
with devices.autocast():
p.init(all_prompts, all_seeds, all_subseeds)
if state.job_count == -1:
state.job_count = p.n_iter
for n in range(p.n_iter):
if state.skipped:
state.skipped = False
if state.interrupted:
break
prompts = all_prompts[n * p.batch_size:(n + 1) * p.batch_size]
seeds = all_seeds[n * p.batch_size:(n + 1) * p.batch_size]
subseeds = all_subseeds[n * p.batch_size:(n + 1) * p.batch_size]
if (len(prompts) == 0):
break
#uc = p.sd_model.get_learned_conditioning(len(prompts) * [p.negative_prompt])
#c = p.sd_model.get_learned_conditioning(prompts)
with devices.autocast():
uc = prompt_parser.get_learned_conditioning(shared.sd_model, len(prompts) * [p.negative_prompt], p.steps)
c = prompt_parser.get_multicond_learned_conditioning(shared.sd_model, prompts, p.steps)
if len(model_hijack.comments) > 0:
for comment in model_hijack.comments:
comments[comment] = 1
if p.n_iter > 1:
shared.state.job = f"Batch {n+1} out of {p.n_iter}"
with devices.autocast():
samples_ddim = p.sample(conditioning=c, unconditional_conditioning=uc, seeds=seeds, subseeds=subseeds, subseed_strength=p.subseed_strength)
if state.interrupted or state.skipped:
# if we are interrupted, sample returns just noise
# use the image collected previously in sampler loop
samples_ddim = shared.state.current_latent
samples_ddim = samples_ddim.to(devices.dtype)
x_samples_ddim = p.sd_model.decode_first_stage(samples_ddim)
x_samples_ddim = torch.clamp((x_samples_ddim + 1.0) / 2.0, min=0.0, max=1.0)
del samples_ddim
if shared.cmd_opts.lowvram or shared.cmd_opts.medvram:
lowvram.send_everything_to_cpu()
devices.torch_gc()
if opts.filter_nsfw:
import modules.safety as safety
x_samples_ddim = modules.safety.censor_batch(x_samples_ddim)
for i, x_sample in enumerate(x_samples_ddim):
x_sample = 255. * np.moveaxis(x_sample.cpu().numpy(), 0, 2)
x_sample = x_sample.astype(np.uint8)
if p.restore_faces:
if opts.save and not p.do_not_save_samples and opts.save_images_before_face_restoration:
images.save_image(Image.fromarray(x_sample), p.outpath_samples, "", seeds[i], prompts[i], opts.samples_format, info=infotext(n, i), p=p, suffix="-before-face-restoration")
devices.torch_gc()
x_sample = modules.face_restoration.restore_faces(x_sample)
devices.torch_gc()
image = Image.fromarray(x_sample)
if p.color_corrections is not None and i < len(p.color_corrections):
if opts.save and not p.do_not_save_samples and opts.save_images_before_color_correction:
images.save_image(image, p.outpath_samples, "", seeds[i], prompts[i], opts.samples_format, info=infotext(n, i), p=p, suffix="-before-color-correction")
image = apply_color_correction(p.color_corrections[i], image)
if p.overlay_images is not None and i < len(p.overlay_images):
overlay = p.overlay_images[i]
if p.paste_to is not None:
x, y, w, h = p.paste_to
base_image = Image.new('RGBA', (overlay.width, overlay.height))
image = images.resize_image(1, image, w, h)
base_image.paste(image, (x, y))
image = base_image
image = image.convert('RGBA')
image.alpha_composite(overlay)
image = image.convert('RGB')
if opts.samples_save and not p.do_not_save_samples:
images.save_image(image, p.outpath_samples, "", seeds[i], prompts[i], opts.samples_format, info=infotext(n, i), p=p)
text = infotext(n, i)
infotexts.append(text)
image.info["parameters"] = text
output_images.append(image)
del x_samples_ddim
devices.torch_gc()
state.nextjob()
p.color_corrections = None
index_of_first_image = 0
unwanted_grid_because_of_img_count = len(output_images) < 2 and opts.grid_only_if_multiple
if (opts.return_grid or opts.grid_save) and not p.do_not_save_grid and not unwanted_grid_because_of_img_count:
grid = images.image_grid(output_images, p.batch_size)
if opts.return_grid:
text = infotext()
infotexts.insert(0, text)
grid.info["parameters"] = text
output_images.insert(0, grid)
index_of_first_image = 1
if opts.grid_save:
images.save_image(grid, p.outpath_grids, "grid", all_seeds[0], all_prompts[0], opts.grid_format, info=infotext(), short_filename=not opts.grid_extended_filename, p=p, grid=True)
devices.torch_gc()
return Processed(p, output_images, all_seeds[0], infotext() + "".join(["\n\n" + x for x in comments]), subseed=all_subseeds[0], all_prompts=all_prompts, all_seeds=all_seeds, all_subseeds=all_subseeds, index_of_first_image=index_of_first_image, infotexts=infotexts)
class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing):
sampler = None
firstphase_width = 0
firstphase_height = 0
firstphase_width_truncated = 0
firstphase_height_truncated = 0
def __init__(self, enable_hr=False, scale_latent=True, denoising_strength=0.75, **kwargs):
super().__init__(**kwargs)
self.enable_hr = enable_hr
self.scale_latent = scale_latent
self.denoising_strength = denoising_strength
def init(self, all_prompts, all_seeds, all_subseeds):
if self.enable_hr:
if state.job_count == -1:
state.job_count = self.n_iter * 2
else:
state.job_count = state.job_count * 2
desired_pixel_count = 512 * 512
actual_pixel_count = self.width * self.height
scale = math.sqrt(desired_pixel_count / actual_pixel_count)
self.firstphase_width = math.ceil(scale * self.width / 64) * 64
self.firstphase_height = math.ceil(scale * self.height / 64) * 64
self.firstphase_width_truncated = int(scale * self.width)
self.firstphase_height_truncated = int(scale * self.height)
def sample(self, conditioning, unconditional_conditioning, seeds, subseeds, subseed_strength):
self.sampler = sd_samplers.create_sampler_with_index(sd_samplers.samplers, self.sampler_index, self.sd_model)
if not self.enable_hr:
x = create_random_tensors([opt_C, self.height // opt_f, self.width // opt_f], seeds=seeds, subseeds=subseeds, subseed_strength=self.subseed_strength, seed_resize_from_h=self.seed_resize_from_h, seed_resize_from_w=self.seed_resize_from_w, p=self)
samples = self.sampler.sample(self, x, conditioning, unconditional_conditioning)
return samples
x = create_random_tensors([opt_C, self.firstphase_height // opt_f, self.firstphase_width // opt_f], seeds=seeds, subseeds=subseeds, subseed_strength=self.subseed_strength, seed_resize_from_h=self.seed_resize_from_h, seed_resize_from_w=self.seed_resize_from_w, p=self)
samples = self.sampler.sample(self, x, conditioning, unconditional_conditioning)
truncate_x = (self.firstphase_width - self.firstphase_width_truncated) // opt_f
truncate_y = (self.firstphase_height - self.firstphase_height_truncated) // opt_f
samples = samples[:, :, truncate_y//2:samples.shape[2]-truncate_y//2, truncate_x//2:samples.shape[3]-truncate_x//2]
if self.scale_latent:
samples = torch.nn.functional.interpolate(samples, size=(self.height // opt_f, self.width // opt_f), mode="bilinear")
else:
decoded_samples = self.sd_model.decode_first_stage(samples)
if opts.upscaler_for_img2img is None or opts.upscaler_for_img2img == "None":
decoded_samples = torch.nn.functional.interpolate(decoded_samples, size=(self.height, self.width), mode="bilinear")
else:
lowres_samples = torch.clamp((decoded_samples + 1.0) / 2.0, min=0.0, max=1.0)
batch_images = []
for i, x_sample in enumerate(lowres_samples):
x_sample = 255. * np.moveaxis(x_sample.cpu().numpy(), 0, 2)
x_sample = x_sample.astype(np.uint8)
image = Image.fromarray(x_sample)
image = images.resize_image(0, image, self.width, self.height)
image = np.array(image).astype(np.float32) / 255.0
image = np.moveaxis(image, 2, 0)
batch_images.append(image)
decoded_samples = torch.from_numpy(np.array(batch_images))
decoded_samples = decoded_samples.to(shared.device)
decoded_samples = 2. * decoded_samples - 1.
samples = self.sd_model.get_first_stage_encoding(self.sd_model.encode_first_stage(decoded_samples))
shared.state.nextjob()
self.sampler = sd_samplers.create_sampler_with_index(sd_samplers.samplers, self.sampler_index, self.sd_model)
noise = create_random_tensors(samples.shape[1:], seeds=seeds, subseeds=subseeds, subseed_strength=subseed_strength, seed_resize_from_h=self.seed_resize_from_h, seed_resize_from_w=self.seed_resize_from_w, p=self)
# GC now before running the next img2img to prevent running out of memory
x = None
devices.torch_gc()
samples = self.sampler.sample_img2img(self, samples, noise, conditioning, unconditional_conditioning, steps=self.steps)
return samples
class StableDiffusionProcessingImg2Img(StableDiffusionProcessing):
sampler = None
def __init__(self, init_images=None, resize_mode=0, denoising_strength=0.75, mask=None, mask_blur=4, inpainting_fill=0, inpaint_full_res=True, inpaint_full_res_padding=0, inpainting_mask_invert=0, **kwargs):
super().__init__(**kwargs)
self.init_images = init_images
self.resize_mode: int = resize_mode
self.denoising_strength: float = denoising_strength
self.init_latent = None
self.image_mask = mask
#self.image_unblurred_mask = None
self.latent_mask = None
self.mask_for_overlay = None
self.mask_blur = mask_blur
self.inpainting_fill = inpainting_fill
self.inpaint_full_res = inpaint_full_res
self.inpaint_full_res_padding = inpaint_full_res_padding
self.inpainting_mask_invert = inpainting_mask_invert
self.mask = None
self.nmask = None
def init(self, all_prompts, all_seeds, all_subseeds):
self.sampler = sd_samplers.create_sampler_with_index(sd_samplers.samplers_for_img2img, self.sampler_index, self.sd_model)
crop_region = None
if self.image_mask is not None:
self.image_mask = self.image_mask.convert('L')
if self.inpainting_mask_invert:
self.image_mask = ImageOps.invert(self.image_mask)
#self.image_unblurred_mask = self.image_mask
if self.mask_blur > 0:
self.image_mask = self.image_mask.filter(ImageFilter.GaussianBlur(self.mask_blur))
if self.inpaint_full_res:
self.mask_for_overlay = self.image_mask
mask = self.image_mask.convert('L')
crop_region = masking.get_crop_region(np.array(mask), self.inpaint_full_res_padding)
crop_region = masking.expand_crop_region(crop_region, self.width, self.height, mask.width, mask.height)
x1, y1, x2, y2 = crop_region
mask = mask.crop(crop_region)
self.image_mask = images.resize_image(2, mask, self.width, self.height)
self.paste_to = (x1, y1, x2-x1, y2-y1)
else:
self.image_mask = images.resize_image(self.resize_mode, self.image_mask, self.width, self.height)
np_mask = np.array(self.image_mask)
np_mask = np.clip((np_mask.astype(np.float32)) * 2, 0, 255).astype(np.uint8)
self.mask_for_overlay = Image.fromarray(np_mask)
self.overlay_images = []
latent_mask = self.latent_mask if self.latent_mask is not None else self.image_mask
add_color_corrections = opts.img2img_color_correction and self.color_corrections is None
if add_color_corrections:
self.color_corrections = []
imgs = []
for img in self.init_images:
image = img.convert("RGB")
if crop_region is None:
image = images.resize_image(self.resize_mode, image, self.width, self.height)
if self.image_mask is not None:
image_masked = Image.new('RGBa', (image.width, image.height))
image_masked.paste(image.convert("RGBA").convert("RGBa"), mask=ImageOps.invert(self.mask_for_overlay.convert('L')))
self.overlay_images.append(image_masked.convert('RGBA'))
if crop_region is not None:
image = image.crop(crop_region)
image = images.resize_image(2, image, self.width, self.height)
if self.image_mask is not None:
if self.inpainting_fill != 1:
image = masking.fill(image, latent_mask)
if add_color_corrections:
self.color_corrections.append(setup_color_correction(image))
image = np.array(image).astype(np.float32) / 255.0
image = np.moveaxis(image, 2, 0)
imgs.append(image)
if len(imgs) == 1:
batch_images = np.expand_dims(imgs[0], axis=0).repeat(self.batch_size, axis=0)
if self.overlay_images is not None:
self.overlay_images = self.overlay_images * self.batch_size
elif len(imgs) <= self.batch_size:
self.batch_size = len(imgs)
batch_images = np.array(imgs)
else:
raise RuntimeError(f"bad number of images passed: {len(imgs)}; expecting {self.batch_size} or less")
image = torch.from_numpy(batch_images)
image = 2. * image - 1.
image = image.to(shared.device)
self.init_latent = self.sd_model.get_first_stage_encoding(self.sd_model.encode_first_stage(image))
if self.image_mask is not None:
init_mask = latent_mask
latmask = init_mask.convert('RGB').resize((self.init_latent.shape[3], self.init_latent.shape[2]))
latmask = np.moveaxis(np.array(latmask, dtype=np.float32), 2, 0) / 255
latmask = latmask[0]
latmask = np.around(latmask)
latmask = np.tile(latmask[None], (4, 1, 1))
self.mask = torch.asarray(1.0 - latmask).to(shared.device).type(self.sd_model.dtype)
self.nmask = torch.asarray(latmask).to(shared.device).type(self.sd_model.dtype)
# this needs to be fixed to be done in sample() using actual seeds for batches
if self.inpainting_fill == 2:
self.init_latent = self.init_latent * self.mask + create_random_tensors(self.init_latent.shape[1:], all_seeds[0:self.init_latent.shape[0]]) * self.nmask
elif self.inpainting_fill == 3:
self.init_latent = self.init_latent * self.mask
def sample(self, conditioning, unconditional_conditioning, seeds, subseeds, subseed_strength):
x = create_random_tensors([opt_C, self.height // opt_f, self.width // opt_f], seeds=seeds, subseeds=subseeds, subseed_strength=self.subseed_strength, seed_resize_from_h=self.seed_resize_from_h, seed_resize_from_w=self.seed_resize_from_w, p=self)
samples = self.sampler.sample_img2img(self, self.init_latent, x, conditioning, unconditional_conditioning)
if self.mask is not None:
samples = samples * self.nmask + self.init_latent * self.mask
del x
devices.torch_gc()
return samples
<|code_end|>
modules/sd_models.py
<|code_start|>import glob
import os.path
import sys
from collections import namedtuple
import torch
from omegaconf import OmegaConf
from ldm.util import instantiate_from_config
from modules import shared, modelloader, devices
from modules.paths import models_path
model_dir = "Stable-diffusion"
model_path = os.path.abspath(os.path.join(models_path, model_dir))
CheckpointInfo = namedtuple("CheckpointInfo", ['filename', 'title', 'hash', 'model_name', 'config'])
checkpoints_list = {}
try:
# this silences the annoying "Some weights of the model checkpoint were not used when initializing..." message at start.
from transformers import logging
logging.set_verbosity_error()
except Exception:
pass
def setup_model():
if not os.path.exists(model_path):
os.makedirs(model_path)
list_models()
def checkpoint_tiles():
return sorted([x.title for x in checkpoints_list.values()])
def list_models():
checkpoints_list.clear()
model_list = modelloader.load_models(model_path=model_path, command_path=shared.cmd_opts.ckpt_dir, ext_filter=[".ckpt"])
def modeltitle(path, shorthash):
abspath = os.path.abspath(path)
if shared.cmd_opts.ckpt_dir is not None and abspath.startswith(shared.cmd_opts.ckpt_dir):
name = abspath.replace(shared.cmd_opts.ckpt_dir, '')
elif abspath.startswith(model_path):
name = abspath.replace(model_path, '')
else:
name = os.path.basename(path)
if name.startswith("\\") or name.startswith("/"):
name = name[1:]
shortname = os.path.splitext(name.replace("/", "_").replace("\\", "_"))[0]
return f'{name} [{shorthash}]', shortname
cmd_ckpt = shared.cmd_opts.ckpt
if os.path.exists(cmd_ckpt):
h = model_hash(cmd_ckpt)
title, short_model_name = modeltitle(cmd_ckpt, h)
checkpoints_list[title] = CheckpointInfo(cmd_ckpt, title, h, short_model_name, shared.cmd_opts.config)
shared.opts.data['sd_model_checkpoint'] = title
elif cmd_ckpt is not None and cmd_ckpt != shared.default_sd_model_file:
print(f"Checkpoint in --ckpt argument not found (Possible it was moved to {model_path}: {cmd_ckpt}", file=sys.stderr)
for filename in model_list:
h = model_hash(filename)
title, short_model_name = modeltitle(filename, h)
basename, _ = os.path.splitext(filename)
config = basename + ".yaml"
if not os.path.exists(config):
config = shared.cmd_opts.config
checkpoints_list[title] = CheckpointInfo(filename, title, h, short_model_name, config)
def get_closet_checkpoint_match(searchString):
applicable = sorted([info for info in checkpoints_list.values() if searchString in info.title], key = lambda x:len(x.title))
if len(applicable) > 0:
return applicable[0]
return None
def model_hash(filename):
try:
with open(filename, "rb") as file:
import hashlib
m = hashlib.sha256()
file.seek(0x100000)
m.update(file.read(0x10000))
return m.hexdigest()[0:8]
except FileNotFoundError:
return 'NOFILE'
def select_checkpoint():
model_checkpoint = shared.opts.sd_model_checkpoint
checkpoint_info = checkpoints_list.get(model_checkpoint, None)
if checkpoint_info is not None:
return checkpoint_info
if len(checkpoints_list) == 0:
print(f"No checkpoints found. When searching for checkpoints, looked at:", file=sys.stderr)
if shared.cmd_opts.ckpt is not None:
print(f" - file {os.path.abspath(shared.cmd_opts.ckpt)}", file=sys.stderr)
print(f" - directory {model_path}", file=sys.stderr)
if shared.cmd_opts.ckpt_dir is not None:
print(f" - directory {os.path.abspath(shared.cmd_opts.ckpt_dir)}", file=sys.stderr)
print(f"Can't run without a checkpoint. Find and place a .ckpt file into any of those locations. The program will exit.", file=sys.stderr)
exit(1)
checkpoint_info = next(iter(checkpoints_list.values()))
if model_checkpoint is not None:
print(f"Checkpoint {model_checkpoint} not found; loading fallback {checkpoint_info.title}", file=sys.stderr)
return checkpoint_info
def get_state_dict_from_checkpoint(pl_sd):
if "state_dict" in pl_sd:
return pl_sd["state_dict"]
return pl_sd
def load_model_weights(model, checkpoint_info):
checkpoint_file = checkpoint_info.filename
sd_model_hash = checkpoint_info.hash
print(f"Loading weights [{sd_model_hash}] from {checkpoint_file}")
pl_sd = torch.load(checkpoint_file, map_location="cpu")
if "global_step" in pl_sd:
print(f"Global Step: {pl_sd['global_step']}")
sd = get_state_dict_from_checkpoint(pl_sd)
model.load_state_dict(sd, strict=False)
if shared.cmd_opts.opt_channelslast:
model.to(memory_format=torch.channels_last)
if not shared.cmd_opts.no_half:
model.half()
devices.dtype = torch.float32 if shared.cmd_opts.no_half else torch.float16
vae_file = os.path.splitext(checkpoint_file)[0] + ".vae.pt"
if os.path.exists(vae_file):
print(f"Loading VAE weights from: {vae_file}")
vae_ckpt = torch.load(vae_file, map_location="cpu")
vae_dict = {k: v for k, v in vae_ckpt["state_dict"].items() if k[0:4] != "loss"}
model.first_stage_model.load_state_dict(vae_dict)
model.sd_model_hash = sd_model_hash
model.sd_model_checkpoint = checkpoint_file
model.sd_checkpoint_info = checkpoint_info
def load_model():
from modules import lowvram, sd_hijack
checkpoint_info = select_checkpoint()
if checkpoint_info.config != shared.cmd_opts.config:
print(f"Loading config from: {checkpoint_info.config}")
sd_config = OmegaConf.load(checkpoint_info.config)
sd_model = instantiate_from_config(sd_config.model)
load_model_weights(sd_model, checkpoint_info)
if shared.cmd_opts.lowvram or shared.cmd_opts.medvram:
lowvram.setup_for_low_vram(sd_model, shared.cmd_opts.medvram)
else:
sd_model.to(shared.device)
sd_hijack.model_hijack.hijack(sd_model)
sd_model.eval()
print(f"Model loaded.")
return sd_model
def reload_model_weights(sd_model, info=None):
from modules import lowvram, devices, sd_hijack
checkpoint_info = info or select_checkpoint()
if sd_model.sd_model_checkpoint == checkpoint_info.filename:
return
if sd_model.sd_checkpoint_info.config != checkpoint_info.config:
return load_model()
if shared.cmd_opts.lowvram or shared.cmd_opts.medvram:
lowvram.send_everything_to_cpu()
else:
sd_model.to(devices.cpu)
sd_hijack.model_hijack.undo_hijack(sd_model)
load_model_weights(sd_model, checkpoint_info)
sd_hijack.model_hijack.hijack(sd_model)
if not shared.cmd_opts.lowvram and not shared.cmd_opts.medvram:
sd_model.to(devices.device)
print(f"Weights loaded.")
return sd_model
<|code_end|>
modules/shared.py
<|code_start|>import argparse
import datetime
import json
import os
import sys
import gradio as gr
import tqdm
import modules.artists
import modules.interrogate
import modules.memmon
import modules.sd_models
import modules.styles
import modules.devices as devices
from modules import sd_samplers, hypernetwork
from modules.paths import models_path, script_path, sd_path
sd_model_file = os.path.join(script_path, 'model.ckpt')
default_sd_model_file = sd_model_file
parser = argparse.ArgumentParser()
parser.add_argument("--config", type=str, default=os.path.join(sd_path, "configs/stable-diffusion/v1-inference.yaml"), help="path to config which constructs model",)
parser.add_argument("--ckpt", type=str, default=sd_model_file, help="path to checkpoint of stable diffusion model; if specified, this checkpoint will be added to the list of checkpoints and loaded",)
parser.add_argument("--ckpt-dir", type=str, default=None, help="Path to directory with stable diffusion checkpoints")
parser.add_argument("--gfpgan-dir", type=str, help="GFPGAN directory", default=('./src/gfpgan' if os.path.exists('./src/gfpgan') else './GFPGAN'))
parser.add_argument("--gfpgan-model", type=str, help="GFPGAN model file name", default=None)
parser.add_argument("--no-half", action='store_true', help="do not switch the model to 16-bit floats")
parser.add_argument("--no-progressbar-hiding", action='store_true', help="do not hide progressbar in gradio UI (we hide it because it slows down ML if you have hardware acceleration in browser)")
parser.add_argument("--max-batch-count", type=int, default=16, help="maximum batch count value for the UI")
parser.add_argument("--embeddings-dir", type=str, default=os.path.join(script_path, 'embeddings'), help="embeddings directory for textual inversion (default: embeddings)")
parser.add_argument("--allow-code", action='store_true', help="allow custom script execution from webui")
parser.add_argument("--medvram", action='store_true', help="enable stable diffusion model optimizations for sacrificing a little speed for low VRM usage")
parser.add_argument("--lowvram", action='store_true', help="enable stable diffusion model optimizations for sacrificing a lot of speed for very low VRM usage")
parser.add_argument("--always-batch-cond-uncond", action='store_true', help="disables cond/uncond batching that is enabled to save memory with --medvram or --lowvram")
parser.add_argument("--unload-gfpgan", action='store_true', help="does not do anything.")
parser.add_argument("--precision", type=str, help="evaluate at this precision", choices=["full", "autocast"], default="autocast")
parser.add_argument("--share", action='store_true', help="use share=True for gradio and make the UI accessible through their site (doesn't work for me but you might have better luck)")
parser.add_argument("--codeformer-models-path", type=str, help="Path to directory with codeformer model file(s).", default=os.path.join(models_path, 'Codeformer'))
parser.add_argument("--gfpgan-models-path", type=str, help="Path to directory with GFPGAN model file(s).", default=os.path.join(models_path, 'GFPGAN'))
parser.add_argument("--esrgan-models-path", type=str, help="Path to directory with ESRGAN model file(s).", default=os.path.join(models_path, 'ESRGAN'))
parser.add_argument("--bsrgan-models-path", type=str, help="Path to directory with BSRGAN model file(s).", default=os.path.join(models_path, 'BSRGAN'))
parser.add_argument("--realesrgan-models-path", type=str, help="Path to directory with RealESRGAN model file(s).", default=os.path.join(models_path, 'RealESRGAN'))
parser.add_argument("--scunet-models-path", type=str, help="Path to directory with ScuNET model file(s).", default=os.path.join(models_path, 'ScuNET'))
parser.add_argument("--swinir-models-path", type=str, help="Path to directory with SwinIR model file(s).", default=os.path.join(models_path, 'SwinIR'))
parser.add_argument("--ldsr-models-path", type=str, help="Path to directory with LDSR model file(s).", default=os.path.join(models_path, 'LDSR'))
parser.add_argument("--xformers", action='store_true', help="enable xformers for cross attention layers")
parser.add_argument("--force-enable-xformers", action='store_true', help="enable xformers for cross attention layers regardless of whether the checking code thinks you can run it; do not make bug reports if this fails to work")
parser.add_argument("--deepdanbooru", action='store_true', help="enable deepdanbooru interrogator")
parser.add_argument("--opt-split-attention", action='store_true', help="force-enables cross-attention layer optimization. By default, it's on for torch.cuda and off for other torch devices.")
parser.add_argument("--disable-opt-split-attention", action='store_true', help="force-disables cross-attention layer optimization")
parser.add_argument("--opt-split-attention-v1", action='store_true', help="enable older version of split attention optimization that does not consume all the VRAM it can find")
parser.add_argument("--use-cpu", nargs='+',choices=['SD', 'GFPGAN', 'BSRGAN', 'ESRGAN', 'SCUNet', 'CodeFormer'], help="use CPU as torch device for specified modules", default=[])
parser.add_argument("--listen", action='store_true', help="launch gradio with 0.0.0.0 as server name, allowing to respond to network requests")
parser.add_argument("--port", type=int, help="launch gradio with given server port, you need root/admin rights for ports < 1024, defaults to 7860 if available", default=None)
parser.add_argument("--show-negative-prompt", action='store_true', help="does not do anything", default=False)
parser.add_argument("--ui-config-file", type=str, help="filename to use for ui configuration", default=os.path.join(script_path, 'ui-config.json'))
parser.add_argument("--hide-ui-dir-config", action='store_true', help="hide directory configuration from webui", default=False)
parser.add_argument("--ui-settings-file", type=str, help="filename to use for ui settings", default=os.path.join(script_path, 'config.json'))
parser.add_argument("--gradio-debug", action='store_true', help="launch gradio with --debug option")
parser.add_argument("--gradio-auth", type=str, help='set gradio authentication like "username:password"; or comma-delimit multiple like "u1:p1,u2:p2,u3:p3"', default=None)
parser.add_argument("--gradio-img2img-tool", type=str, help='gradio image uploader tool: can be either editor for ctopping, or color-sketch for drawing', choices=["color-sketch", "editor"], default="editor")
parser.add_argument("--opt-channelslast", action='store_true', help="change memory type for stable diffusion to channels last")
parser.add_argument("--styles-file", type=str, help="filename to use for styles", default=os.path.join(script_path, 'styles.csv'))
parser.add_argument("--autolaunch", action='store_true', help="open the webui URL in the system's default browser upon launch", default=False)
parser.add_argument("--use-textbox-seed", action='store_true', help="use textbox for seeds in UI (no up/down, but possible to input long seeds)", default=False)
parser.add_argument("--disable-console-progressbars", action='store_true', help="do not output progressbars to console", default=False)
parser.add_argument("--enable-console-prompts", action='store_true', help="print prompts to console when generating with txt2img and img2img", default=False)
cmd_opts = parser.parse_args()
devices.device, devices.device_gfpgan, devices.device_bsrgan, devices.device_esrgan, devices.device_scunet, devices.device_codeformer = \
(devices.cpu if x in cmd_opts.use_cpu else devices.get_optimal_device() for x in ['SD', 'GFPGAN', 'BSRGAN', 'ESRGAN', 'SCUNet', 'CodeFormer'])
device = devices.device
batch_cond_uncond = cmd_opts.always_batch_cond_uncond or not (cmd_opts.lowvram or cmd_opts.medvram)
parallel_processing_allowed = not cmd_opts.lowvram and not cmd_opts.medvram
xformers_available = False
config_filename = cmd_opts.ui_settings_file
hypernetworks = hypernetwork.list_hypernetworks(os.path.join(models_path, 'hypernetworks'))
loaded_hypernetwork = None
class State:
skipped = False
interrupted = False
job = ""
job_no = 0
job_count = 0
job_timestamp = '0'
sampling_step = 0
sampling_steps = 0
current_latent = None
current_image = None
current_image_sampling_step = 0
textinfo = None
def skip(self):
self.skipped = True
def interrupt(self):
self.interrupted = True
def nextjob(self):
self.job_no += 1
self.sampling_step = 0
self.current_image_sampling_step = 0
def get_job_timestamp(self):
return datetime.datetime.now().strftime("%Y%m%d%H%M%S") # shouldn't this return job_timestamp?
state = State()
artist_db = modules.artists.ArtistsDatabase(os.path.join(script_path, 'artists.csv'))
styles_filename = cmd_opts.styles_file
prompt_styles = modules.styles.StyleDatabase(styles_filename)
interrogator = modules.interrogate.InterrogateModels("interrogate")
face_restorers = []
def realesrgan_models_names():
import modules.realesrgan_model
return [x.name for x in modules.realesrgan_model.get_realesrgan_models(None)]
class OptionInfo:
def __init__(self, default=None, label="", component=None, component_args=None, onchange=None):
self.default = default
self.label = label
self.component = component
self.component_args = component_args
self.onchange = onchange
self.section = None
def options_section(section_identifier, options_dict):
for k, v in options_dict.items():
v.section = section_identifier
return options_dict
hide_dirs = {"visible": not cmd_opts.hide_ui_dir_config}
options_templates = {}
options_templates.update(options_section(('saving-images', "Saving images/grids"), {
"samples_save": OptionInfo(True, "Always save all generated images"),
"samples_format": OptionInfo('png', 'File format for images'),
"samples_filename_pattern": OptionInfo("", "Images filename pattern"),
"grid_save": OptionInfo(True, "Always save all generated image grids"),
"grid_format": OptionInfo('png', 'File format for grids'),
"grid_extended_filename": OptionInfo(False, "Add extended info (seed, prompt) to filename when saving grid"),
"grid_only_if_multiple": OptionInfo(True, "Do not save grids consisting of one picture"),
"n_rows": OptionInfo(-1, "Grid row count; use -1 for autodetect and 0 for it to be same as batch size", gr.Slider, {"minimum": -1, "maximum": 16, "step": 1}),
"enable_pnginfo": OptionInfo(True, "Save text information about generation parameters as chunks to png files"),
"save_txt": OptionInfo(False, "Create a text file next to every image with generation parameters."),
"save_images_before_face_restoration": OptionInfo(False, "Save a copy of image before doing face restoration."),
"jpeg_quality": OptionInfo(80, "Quality for saved jpeg images", gr.Slider, {"minimum": 1, "maximum": 100, "step": 1}),
"export_for_4chan": OptionInfo(True, "If PNG image is larger than 4MB or any dimension is larger than 4000, downscale and save copy as JPG"),
"use_original_name_batch": OptionInfo(False, "Use original name for output filename during batch process in extras tab"),
"save_selected_only": OptionInfo(True, "When using 'Save' button, only save a single selected image"),
}))
options_templates.update(options_section(('saving-paths', "Paths for saving"), {
"outdir_samples": OptionInfo("", "Output directory for images; if empty, defaults to three directories below", component_args=hide_dirs),
"outdir_txt2img_samples": OptionInfo("outputs/txt2img-images", 'Output directory for txt2img images', component_args=hide_dirs),
"outdir_img2img_samples": OptionInfo("outputs/img2img-images", 'Output directory for img2img images', component_args=hide_dirs),
"outdir_extras_samples": OptionInfo("outputs/extras-images", 'Output directory for images from extras tab', component_args=hide_dirs),
"outdir_grids": OptionInfo("", "Output directory for grids; if empty, defaults to two directories below", component_args=hide_dirs),
"outdir_txt2img_grids": OptionInfo("outputs/txt2img-grids", 'Output directory for txt2img grids', component_args=hide_dirs),
"outdir_img2img_grids": OptionInfo("outputs/img2img-grids", 'Output directory for img2img grids', component_args=hide_dirs),
"outdir_save": OptionInfo("log/images", "Directory for saving images using the Save button", component_args=hide_dirs),
}))
options_templates.update(options_section(('saving-to-dirs', "Saving to a directory"), {
"save_to_dirs": OptionInfo(False, "Save images to a subdirectory"),
"grid_save_to_dirs": OptionInfo(False, "Save grids to a subdirectory"),
"use_save_to_dirs_for_ui": OptionInfo(False, "When using \"Save\" button, save images to a subdirectory"),
"directories_filename_pattern": OptionInfo("", "Directory name pattern"),
"directories_max_prompt_words": OptionInfo(8, "Max prompt words for [prompt_words] pattern", gr.Slider, {"minimum": 1, "maximum": 20, "step": 1}),
}))
options_templates.update(options_section(('upscaling', "Upscaling"), {
"ESRGAN_tile": OptionInfo(192, "Tile size for ESRGAN upscalers. 0 = no tiling.", gr.Slider, {"minimum": 0, "maximum": 512, "step": 16}),
"ESRGAN_tile_overlap": OptionInfo(8, "Tile overlap, in pixels for ESRGAN upscalers. Low values = visible seam.", gr.Slider, {"minimum": 0, "maximum": 48, "step": 1}),
"realesrgan_enabled_models": OptionInfo(["R-ESRGAN x4+", "R-ESRGAN x4+ Anime6B"], "Select which Real-ESRGAN models to show in the web UI. (Requires restart)", gr.CheckboxGroup, lambda: {"choices": realesrgan_models_names()}),
"SWIN_tile": OptionInfo(192, "Tile size for all SwinIR.", gr.Slider, {"minimum": 16, "maximum": 512, "step": 16}),
"SWIN_tile_overlap": OptionInfo(8, "Tile overlap, in pixels for SwinIR. Low values = visible seam.", gr.Slider, {"minimum": 0, "maximum": 48, "step": 1}),
"ldsr_steps": OptionInfo(100, "LDSR processing steps. Lower = faster", gr.Slider, {"minimum": 1, "maximum": 200, "step": 1}),
"upscaler_for_img2img": OptionInfo(None, "Upscaler for img2img", gr.Dropdown, lambda: {"choices": [x.name for x in sd_upscalers]}),
}))
options_templates.update(options_section(('face-restoration', "Face restoration"), {
"face_restoration_model": OptionInfo(None, "Face restoration model", gr.Radio, lambda: {"choices": [x.name() for x in face_restorers]}),
"code_former_weight": OptionInfo(0.5, "CodeFormer weight parameter; 0 = maximum effect; 1 = minimum effect", gr.Slider, {"minimum": 0, "maximum": 1, "step": 0.01}),
"face_restoration_unload": OptionInfo(False, "Move face restoration model from VRAM into RAM after processing"),
}))
options_templates.update(options_section(('system', "System"), {
"memmon_poll_rate": OptionInfo(8, "VRAM usage polls per second during generation. Set to 0 to disable.", gr.Slider, {"minimum": 0, "maximum": 40, "step": 1}),
"samples_log_stdout": OptionInfo(False, "Always print all generation info to standard output"),
"multiple_tqdm": OptionInfo(True, "Add a second progress bar to the console that shows progress for an entire job."),
}))
options_templates.update(options_section(('sd', "Stable Diffusion"), {
"sd_model_checkpoint": OptionInfo(None, "Stable Diffusion checkpoint", gr.Dropdown, lambda: {"choices": modules.sd_models.checkpoint_tiles()}),
"sd_hypernetwork": OptionInfo("None", "Stable Diffusion finetune hypernetwork", gr.Dropdown, lambda: {"choices": ["None"] + [x for x in hypernetworks.keys()]}),
"img2img_color_correction": OptionInfo(False, "Apply color correction to img2img results to match original colors."),
"save_images_before_color_correction": OptionInfo(False, "Save a copy of image before applying color correction to img2img results"),
"img2img_fix_steps": OptionInfo(False, "With img2img, do exactly the amount of steps the slider specifies (normally you'd do less with less denoising)."),
"enable_quantization": OptionInfo(False, "Enable quantization in K samplers for sharper and cleaner results. This may change existing seeds. Requires restart to apply."),
"enable_emphasis": OptionInfo(True, "Emphasis: use (text) to make model pay more attention to text and [text] to make it pay less attention"),
"use_old_emphasis_implementation": OptionInfo(False, "Use old emphasis implementation. Can be useful to reproduce old seeds."),
"enable_batch_seeds": OptionInfo(True, "Make K-diffusion samplers produce same images in a batch as when making a single image"),
"filter_nsfw": OptionInfo(False, "Filter NSFW content"),
'CLIP_ignore_last_layers': OptionInfo(0, "Ignore last layers of CLIP model", gr.Slider, {"minimum": 0, "maximum": 5, "step": 1}),
"random_artist_categories": OptionInfo([], "Allowed categories for random artists selection when using the Roll button", gr.CheckboxGroup, {"choices": artist_db.categories()}),
}))
options_templates.update(options_section(('interrogate', "Interrogate Options"), {
"interrogate_keep_models_in_memory": OptionInfo(False, "Interrogate: keep models in VRAM"),
"interrogate_use_builtin_artists": OptionInfo(True, "Interrogate: use artists from artists.csv"),
"interrogate_clip_num_beams": OptionInfo(1, "Interrogate: num_beams for BLIP", gr.Slider, {"minimum": 1, "maximum": 16, "step": 1}),
"interrogate_clip_min_length": OptionInfo(24, "Interrogate: minimum description length (excluding artists, etc..)", gr.Slider, {"minimum": 1, "maximum": 128, "step": 1}),
"interrogate_clip_max_length": OptionInfo(48, "Interrogate: maximum description length", gr.Slider, {"minimum": 1, "maximum": 256, "step": 1}),
"interrogate_clip_dict_limit": OptionInfo(1500, "Interrogate: maximum number of lines in text file (0 = No limit)"),
}))
options_templates.update(options_section(('ui', "User interface"), {
"show_progressbar": OptionInfo(True, "Show progressbar"),
"show_progress_every_n_steps": OptionInfo(0, "Show image creation progress every N sampling steps. Set 0 to disable.", gr.Slider, {"minimum": 0, "maximum": 32, "step": 1}),
"return_grid": OptionInfo(True, "Show grid in results for web"),
"do_not_show_images": OptionInfo(False, "Do not show any images in results for web"),
"add_model_hash_to_info": OptionInfo(True, "Add model hash to generation information"),
"font": OptionInfo("", "Font for image grids that have text"),
"js_modal_lightbox": OptionInfo(True, "Enable full page image viewer"),
"js_modal_lightbox_initially_zoomed": OptionInfo(True, "Show images zoomed in by default in full page image viewer"),
"show_progress_in_title": OptionInfo(True, "Show generation progress in window title."),
}))
options_templates.update(options_section(('sampler-params', "Sampler parameters"), {
"hide_samplers": OptionInfo([], "Hide samplers in user interface (requires restart)", gr.CheckboxGroup, lambda: {"choices": [x.name for x in sd_samplers.all_samplers]}),
"eta_ddim": OptionInfo(0.0, "eta (noise multiplier) for DDIM", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}),
"eta_ancestral": OptionInfo(1.0, "eta (noise multiplier) for ancestral samplers", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}),
"ddim_discretize": OptionInfo('uniform', "img2img DDIM discretize", gr.Radio, {"choices": ['uniform', 'quad']}),
's_churn': OptionInfo(0.0, "sigma churn", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}),
's_tmin': OptionInfo(0.0, "sigma tmin", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}),
's_noise': OptionInfo(1.0, "sigma noise", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}),
}))
class Options:
data = None
data_labels = options_templates
typemap = {int: float}
def __init__(self):
self.data = {k: v.default for k, v in self.data_labels.items()}
def __setattr__(self, key, value):
if self.data is not None:
if key in self.data:
self.data[key] = value
return super(Options, self).__setattr__(key, value)
def __getattr__(self, item):
if self.data is not None:
if item in self.data:
return self.data[item]
if item in self.data_labels:
return self.data_labels[item].default
return super(Options, self).__getattribute__(item)
def save(self, filename):
with open(filename, "w", encoding="utf8") as file:
json.dump(self.data, file)
def same_type(self, x, y):
if x is None or y is None:
return True
type_x = self.typemap.get(type(x), type(x))
type_y = self.typemap.get(type(y), type(y))
return type_x == type_y
def load(self, filename):
with open(filename, "r", encoding="utf8") as file:
self.data = json.load(file)
bad_settings = 0
for k, v in self.data.items():
info = self.data_labels.get(k, None)
if info is not None and not self.same_type(info.default, v):
print(f"Warning: bad setting value: {k}: {v} ({type(v).__name__}; expected {type(info.default).__name__})", file=sys.stderr)
bad_settings += 1
if bad_settings > 0:
print(f"The program is likely to not work with bad settings.\nSettings file: {filename}\nEither fix the file, or delete it and restart.", file=sys.stderr)
def onchange(self, key, func):
item = self.data_labels.get(key)
item.onchange = func
def dumpjson(self):
d = {k: self.data.get(k, self.data_labels.get(k).default) for k in self.data_labels.keys()}
return json.dumps(d)
opts = Options()
if os.path.exists(config_filename):
opts.load(config_filename)
sd_upscalers = []
sd_model = None
progress_print_out = sys.stdout
class TotalTQDM:
def __init__(self):
self._tqdm = None
def reset(self):
self._tqdm = tqdm.tqdm(
desc="Total progress",
total=state.job_count * state.sampling_steps,
position=1,
file=progress_print_out
)
def update(self):
if not opts.multiple_tqdm or cmd_opts.disable_console_progressbars:
return
if self._tqdm is None:
self.reset()
self._tqdm.update()
def updateTotal(self, new_total):
if not opts.multiple_tqdm or cmd_opts.disable_console_progressbars:
return
if self._tqdm is None:
self.reset()
self._tqdm.total=new_total
def clear(self):
if self._tqdm is not None:
self._tqdm.close()
self._tqdm = None
total_tqdm = TotalTQDM()
mem_mon = modules.memmon.MemUsageMonitor("MemMon", device, opts)
mem_mon.start()
<|code_end|>
|
modules/processing.py
<|code_start|>import json
import math
import os
import sys
import torch
import numpy as np
from PIL import Image, ImageFilter, ImageOps
import random
import cv2
from skimage import exposure
import modules.sd_hijack
from modules import devices, prompt_parser, masking, sd_samplers, lowvram
from modules.sd_hijack import model_hijack
from modules.shared import opts, cmd_opts, state
import modules.shared as shared
import modules.face_restoration
import modules.images as images
import modules.styles
import logging
# some of those options should not be changed at all because they would break the model, so I removed them from options.
opt_C = 4
opt_f = 8
def setup_color_correction(image):
logging.info("Calibrating color correction.")
correction_target = cv2.cvtColor(np.asarray(image.copy()), cv2.COLOR_RGB2LAB)
return correction_target
def apply_color_correction(correction, image):
logging.info("Applying color correction.")
image = Image.fromarray(cv2.cvtColor(exposure.match_histograms(
cv2.cvtColor(
np.asarray(image),
cv2.COLOR_RGB2LAB
),
correction,
channel_axis=2
), cv2.COLOR_LAB2RGB).astype("uint8"))
return image
def get_correct_sampler(p):
if isinstance(p, modules.processing.StableDiffusionProcessingTxt2Img):
return sd_samplers.samplers
elif isinstance(p, modules.processing.StableDiffusionProcessingImg2Img):
return sd_samplers.samplers_for_img2img
class StableDiffusionProcessing:
def __init__(self, sd_model=None, outpath_samples=None, outpath_grids=None, prompt="", styles=None, seed=-1, subseed=-1, subseed_strength=0, seed_resize_from_h=-1, seed_resize_from_w=-1, seed_enable_extras=True, sampler_index=0, batch_size=1, n_iter=1, steps=50, cfg_scale=7.0, width=512, height=512, restore_faces=False, tiling=False, do_not_save_samples=False, do_not_save_grid=False, extra_generation_params=None, overlay_images=None, negative_prompt=None, eta=None):
self.sd_model = sd_model
self.outpath_samples: str = outpath_samples
self.outpath_grids: str = outpath_grids
self.prompt: str = prompt
self.prompt_for_display: str = None
self.negative_prompt: str = (negative_prompt or "")
self.styles: list = styles or []
self.seed: int = seed
self.subseed: int = subseed
self.subseed_strength: float = subseed_strength
self.seed_resize_from_h: int = seed_resize_from_h
self.seed_resize_from_w: int = seed_resize_from_w
self.sampler_index: int = sampler_index
self.batch_size: int = batch_size
self.n_iter: int = n_iter
self.steps: int = steps
self.cfg_scale: float = cfg_scale
self.width: int = width
self.height: int = height
self.restore_faces: bool = restore_faces
self.tiling: bool = tiling
self.do_not_save_samples: bool = do_not_save_samples
self.do_not_save_grid: bool = do_not_save_grid
self.extra_generation_params: dict = extra_generation_params or {}
self.overlay_images = overlay_images
self.eta = eta
self.paste_to = None
self.color_corrections = None
self.denoising_strength: float = 0
self.sampler_noise_scheduler_override = None
self.ddim_discretize = opts.ddim_discretize
self.s_churn = opts.s_churn
self.s_tmin = opts.s_tmin
self.s_tmax = float('inf') # not representable as a standard ui option
self.s_noise = opts.s_noise
if not seed_enable_extras:
self.subseed = -1
self.subseed_strength = 0
self.seed_resize_from_h = 0
self.seed_resize_from_w = 0
def init(self, all_prompts, all_seeds, all_subseeds):
pass
def sample(self, conditioning, unconditional_conditioning, seeds, subseeds, subseed_strength):
raise NotImplementedError()
class Processed:
def __init__(self, p: StableDiffusionProcessing, images_list, seed=-1, info="", subseed=None, all_prompts=None, all_seeds=None, all_subseeds=None, index_of_first_image=0, infotexts=None):
self.images = images_list
self.prompt = p.prompt
self.negative_prompt = p.negative_prompt
self.seed = seed
self.subseed = subseed
self.subseed_strength = p.subseed_strength
self.info = info
self.width = p.width
self.height = p.height
self.sampler_index = p.sampler_index
self.sampler = sd_samplers.samplers[p.sampler_index].name
self.cfg_scale = p.cfg_scale
self.steps = p.steps
self.batch_size = p.batch_size
self.restore_faces = p.restore_faces
self.face_restoration_model = opts.face_restoration_model if p.restore_faces else None
self.sd_model_hash = shared.sd_model.sd_model_hash
self.seed_resize_from_w = p.seed_resize_from_w
self.seed_resize_from_h = p.seed_resize_from_h
self.denoising_strength = getattr(p, 'denoising_strength', None)
self.extra_generation_params = p.extra_generation_params
self.index_of_first_image = index_of_first_image
self.styles = p.styles
self.job_timestamp = state.job_timestamp
self.clip_skip = opts.CLIP_ignore_last_layers
self.eta = p.eta
self.ddim_discretize = p.ddim_discretize
self.s_churn = p.s_churn
self.s_tmin = p.s_tmin
self.s_tmax = p.s_tmax
self.s_noise = p.s_noise
self.sampler_noise_scheduler_override = p.sampler_noise_scheduler_override
self.prompt = self.prompt if type(self.prompt) != list else self.prompt[0]
self.negative_prompt = self.negative_prompt if type(self.negative_prompt) != list else self.negative_prompt[0]
self.seed = int(self.seed if type(self.seed) != list else self.seed[0])
self.subseed = int(self.subseed if type(self.subseed) != list else self.subseed[0]) if self.subseed is not None else -1
self.all_prompts = all_prompts or [self.prompt]
self.all_seeds = all_seeds or [self.seed]
self.all_subseeds = all_subseeds or [self.subseed]
self.infotexts = infotexts or [info]
def js(self):
obj = {
"prompt": self.prompt,
"all_prompts": self.all_prompts,
"negative_prompt": self.negative_prompt,
"seed": self.seed,
"all_seeds": self.all_seeds,
"subseed": self.subseed,
"all_subseeds": self.all_subseeds,
"subseed_strength": self.subseed_strength,
"width": self.width,
"height": self.height,
"sampler_index": self.sampler_index,
"sampler": self.sampler,
"cfg_scale": self.cfg_scale,
"steps": self.steps,
"batch_size": self.batch_size,
"restore_faces": self.restore_faces,
"face_restoration_model": self.face_restoration_model,
"sd_model_hash": self.sd_model_hash,
"seed_resize_from_w": self.seed_resize_from_w,
"seed_resize_from_h": self.seed_resize_from_h,
"denoising_strength": self.denoising_strength,
"extra_generation_params": self.extra_generation_params,
"index_of_first_image": self.index_of_first_image,
"infotexts": self.infotexts,
"styles": self.styles,
"job_timestamp": self.job_timestamp,
"clip_skip": self.clip_skip,
}
return json.dumps(obj)
def infotext(self, p: StableDiffusionProcessing, index):
return create_infotext(p, self.all_prompts, self.all_seeds, self.all_subseeds, comments=[], position_in_batch=index % self.batch_size, iteration=index // self.batch_size)
# from https://discuss.pytorch.org/t/help-regarding-slerp-function-for-generative-model-sampling/32475/3
def slerp(val, low, high):
low_norm = low/torch.norm(low, dim=1, keepdim=True)
high_norm = high/torch.norm(high, dim=1, keepdim=True)
dot = (low_norm*high_norm).sum(1)
if dot.mean() > 0.9995:
return low * val + high * (1 - val)
omega = torch.acos(dot)
so = torch.sin(omega)
res = (torch.sin((1.0-val)*omega)/so).unsqueeze(1)*low + (torch.sin(val*omega)/so).unsqueeze(1) * high
return res
def create_random_tensors(shape, seeds, subseeds=None, subseed_strength=0.0, seed_resize_from_h=0, seed_resize_from_w=0, p=None):
xs = []
# if we have multiple seeds, this means we are working with batch size>1; this then
# enables the generation of additional tensors with noise that the sampler will use during its processing.
# Using those pre-generated tensors instead of simple torch.randn allows a batch with seeds [100, 101] to
# produce the same images as with two batches [100], [101].
if p is not None and p.sampler is not None and len(seeds) > 1 and opts.enable_batch_seeds:
sampler_noises = [[] for _ in range(p.sampler.number_of_needed_noises(p))]
else:
sampler_noises = None
for i, seed in enumerate(seeds):
noise_shape = shape if seed_resize_from_h <= 0 or seed_resize_from_w <= 0 else (shape[0], seed_resize_from_h//8, seed_resize_from_w//8)
subnoise = None
if subseeds is not None:
subseed = 0 if i >= len(subseeds) else subseeds[i]
subnoise = devices.randn(subseed, noise_shape)
# randn results depend on device; gpu and cpu get different results for same seed;
# the way I see it, it's better to do this on CPU, so that everyone gets same result;
# but the original script had it like this, so I do not dare change it for now because
# it will break everyone's seeds.
noise = devices.randn(seed, noise_shape)
if subnoise is not None:
noise = slerp(subseed_strength, noise, subnoise)
if noise_shape != shape:
x = devices.randn(seed, shape)
dx = (shape[2] - noise_shape[2]) // 2
dy = (shape[1] - noise_shape[1]) // 2
w = noise_shape[2] if dx >= 0 else noise_shape[2] + 2 * dx
h = noise_shape[1] if dy >= 0 else noise_shape[1] + 2 * dy
tx = 0 if dx < 0 else dx
ty = 0 if dy < 0 else dy
dx = max(-dx, 0)
dy = max(-dy, 0)
x[:, ty:ty+h, tx:tx+w] = noise[:, dy:dy+h, dx:dx+w]
noise = x
if sampler_noises is not None:
cnt = p.sampler.number_of_needed_noises(p)
for j in range(cnt):
sampler_noises[j].append(devices.randn_without_seed(tuple(noise_shape)))
xs.append(noise)
if sampler_noises is not None:
p.sampler.sampler_noises = [torch.stack(n).to(shared.device) for n in sampler_noises]
x = torch.stack(xs).to(shared.device)
return x
def get_fixed_seed(seed):
if seed is None or seed == '' or seed == -1:
return int(random.randrange(4294967294))
return seed
def fix_seed(p):
p.seed = get_fixed_seed(p.seed)
p.subseed = get_fixed_seed(p.subseed)
def create_infotext(p, all_prompts, all_seeds, all_subseeds, comments, iteration=0, position_in_batch=0):
index = position_in_batch + iteration * p.batch_size
clip_skip = getattr(p, 'clip_skip', opts.CLIP_ignore_last_layers)
generation_params = {
"Steps": p.steps,
"Sampler": get_correct_sampler(p)[p.sampler_index].name,
"CFG scale": p.cfg_scale,
"Seed": all_seeds[index],
"Face restoration": (opts.face_restoration_model if p.restore_faces else None),
"Size": f"{p.width}x{p.height}",
"Model hash": getattr(p, 'sd_model_hash', None if not opts.add_model_hash_to_info or not shared.sd_model.sd_model_hash else shared.sd_model.sd_model_hash),
"Model": (None if not opts.add_extended_model_details_to_info or not shared.sd_model.sd_checkpoint_info.model_name else shared.sd_model.sd_checkpoint_info.model_name.replace(',', '').replace(':', '')),
"Model VAE": (None if not opts.add_extended_model_details_to_info or not shared.sd_model.sd_model_vae_name else shared.sd_model.sd_model_vae_name.replace(',', '').replace(':', '')),
"Model hypernetwork": (None if not opts.add_extended_model_details_to_info or not opts.sd_hypernetwork else opts.sd_hypernetwork.replace(',', '').replace(':', '')),
"Batch size": (None if p.batch_size < 2 else p.batch_size),
"Batch pos": (None if p.batch_size < 2 else position_in_batch),
"Variation seed": (None if p.subseed_strength == 0 else all_subseeds[index]),
"Variation seed strength": (None if p.subseed_strength == 0 else p.subseed_strength),
"Seed resize from": (None if p.seed_resize_from_w == 0 or p.seed_resize_from_h == 0 else f"{p.seed_resize_from_w}x{p.seed_resize_from_h}"),
"Denoising strength": getattr(p, 'denoising_strength', None),
"Eta": (None if p.sampler is None or p.sampler.eta == p.sampler.default_eta else p.sampler.eta),
"Clip skip": None if clip_skip==0 else clip_skip,
}
generation_params.update(p.extra_generation_params)
generation_params_text = ", ".join([k if k == v else f'{k}: {v}' for k, v in generation_params.items() if v is not None])
negative_prompt_text = "\nNegative prompt: " + p.negative_prompt if p.negative_prompt else ""
return f"{all_prompts[index]}{negative_prompt_text}\n{generation_params_text}".strip()
def process_images(p: StableDiffusionProcessing) -> Processed:
"""this is the main loop that both txt2img and img2img use; it calls func_init once inside all the scopes and func_sample once per batch"""
if type(p.prompt) == list:
assert(len(p.prompt) > 0)
else:
assert p.prompt is not None
devices.torch_gc()
seed = get_fixed_seed(p.seed)
subseed = get_fixed_seed(p.subseed)
if p.outpath_samples is not None:
os.makedirs(p.outpath_samples, exist_ok=True)
if p.outpath_grids is not None:
os.makedirs(p.outpath_grids, exist_ok=True)
modules.sd_hijack.model_hijack.apply_circular(p.tiling)
modules.sd_hijack.model_hijack.clear_comments()
comments = {}
shared.prompt_styles.apply_styles(p)
if type(p.prompt) == list:
all_prompts = p.prompt
else:
all_prompts = p.batch_size * p.n_iter * [p.prompt]
if type(seed) == list:
all_seeds = seed
else:
all_seeds = [int(seed) + (x if p.subseed_strength == 0 else 0) for x in range(len(all_prompts))]
if type(subseed) == list:
all_subseeds = subseed
else:
all_subseeds = [int(subseed) + x for x in range(len(all_prompts))]
def infotext(iteration=0, position_in_batch=0):
return create_infotext(p, all_prompts, all_seeds, all_subseeds, comments, iteration, position_in_batch)
if os.path.exists(cmd_opts.embeddings_dir):
model_hijack.embedding_db.load_textual_inversion_embeddings()
infotexts = []
output_images = []
with torch.no_grad(), p.sd_model.ema_scope():
with devices.autocast():
p.init(all_prompts, all_seeds, all_subseeds)
if state.job_count == -1:
state.job_count = p.n_iter
for n in range(p.n_iter):
if state.skipped:
state.skipped = False
if state.interrupted:
break
prompts = all_prompts[n * p.batch_size:(n + 1) * p.batch_size]
seeds = all_seeds[n * p.batch_size:(n + 1) * p.batch_size]
subseeds = all_subseeds[n * p.batch_size:(n + 1) * p.batch_size]
if (len(prompts) == 0):
break
#uc = p.sd_model.get_learned_conditioning(len(prompts) * [p.negative_prompt])
#c = p.sd_model.get_learned_conditioning(prompts)
with devices.autocast():
uc = prompt_parser.get_learned_conditioning(shared.sd_model, len(prompts) * [p.negative_prompt], p.steps)
c = prompt_parser.get_multicond_learned_conditioning(shared.sd_model, prompts, p.steps)
if len(model_hijack.comments) > 0:
for comment in model_hijack.comments:
comments[comment] = 1
if p.n_iter > 1:
shared.state.job = f"Batch {n+1} out of {p.n_iter}"
with devices.autocast():
samples_ddim = p.sample(conditioning=c, unconditional_conditioning=uc, seeds=seeds, subseeds=subseeds, subseed_strength=p.subseed_strength)
if state.interrupted or state.skipped:
# if we are interrupted, sample returns just noise
# use the image collected previously in sampler loop
samples_ddim = shared.state.current_latent
samples_ddim = samples_ddim.to(devices.dtype)
x_samples_ddim = p.sd_model.decode_first_stage(samples_ddim)
x_samples_ddim = torch.clamp((x_samples_ddim + 1.0) / 2.0, min=0.0, max=1.0)
del samples_ddim
if shared.cmd_opts.lowvram or shared.cmd_opts.medvram:
lowvram.send_everything_to_cpu()
devices.torch_gc()
if opts.filter_nsfw:
import modules.safety as safety
x_samples_ddim = modules.safety.censor_batch(x_samples_ddim)
for i, x_sample in enumerate(x_samples_ddim):
x_sample = 255. * np.moveaxis(x_sample.cpu().numpy(), 0, 2)
x_sample = x_sample.astype(np.uint8)
if p.restore_faces:
if opts.save and not p.do_not_save_samples and opts.save_images_before_face_restoration:
images.save_image(Image.fromarray(x_sample), p.outpath_samples, "", seeds[i], prompts[i], opts.samples_format, info=infotext(n, i), p=p, suffix="-before-face-restoration")
devices.torch_gc()
x_sample = modules.face_restoration.restore_faces(x_sample)
devices.torch_gc()
image = Image.fromarray(x_sample)
if p.color_corrections is not None and i < len(p.color_corrections):
if opts.save and not p.do_not_save_samples and opts.save_images_before_color_correction:
images.save_image(image, p.outpath_samples, "", seeds[i], prompts[i], opts.samples_format, info=infotext(n, i), p=p, suffix="-before-color-correction")
image = apply_color_correction(p.color_corrections[i], image)
if p.overlay_images is not None and i < len(p.overlay_images):
overlay = p.overlay_images[i]
if p.paste_to is not None:
x, y, w, h = p.paste_to
base_image = Image.new('RGBA', (overlay.width, overlay.height))
image = images.resize_image(1, image, w, h)
base_image.paste(image, (x, y))
image = base_image
image = image.convert('RGBA')
image.alpha_composite(overlay)
image = image.convert('RGB')
if opts.samples_save and not p.do_not_save_samples:
images.save_image(image, p.outpath_samples, "", seeds[i], prompts[i], opts.samples_format, info=infotext(n, i), p=p)
text = infotext(n, i)
infotexts.append(text)
image.info["parameters"] = text
output_images.append(image)
del x_samples_ddim
devices.torch_gc()
state.nextjob()
p.color_corrections = None
index_of_first_image = 0
unwanted_grid_because_of_img_count = len(output_images) < 2 and opts.grid_only_if_multiple
if (opts.return_grid or opts.grid_save) and not p.do_not_save_grid and not unwanted_grid_because_of_img_count:
grid = images.image_grid(output_images, p.batch_size)
if opts.return_grid:
text = infotext()
infotexts.insert(0, text)
grid.info["parameters"] = text
output_images.insert(0, grid)
index_of_first_image = 1
if opts.grid_save:
images.save_image(grid, p.outpath_grids, "grid", all_seeds[0], all_prompts[0], opts.grid_format, info=infotext(), short_filename=not opts.grid_extended_filename, p=p, grid=True)
devices.torch_gc()
return Processed(p, output_images, all_seeds[0], infotext() + "".join(["\n\n" + x for x in comments]), subseed=all_subseeds[0], all_prompts=all_prompts, all_seeds=all_seeds, all_subseeds=all_subseeds, index_of_first_image=index_of_first_image, infotexts=infotexts)
class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing):
sampler = None
firstphase_width = 0
firstphase_height = 0
firstphase_width_truncated = 0
firstphase_height_truncated = 0
def __init__(self, enable_hr=False, scale_latent=True, denoising_strength=0.75, **kwargs):
super().__init__(**kwargs)
self.enable_hr = enable_hr
self.scale_latent = scale_latent
self.denoising_strength = denoising_strength
def init(self, all_prompts, all_seeds, all_subseeds):
if self.enable_hr:
if state.job_count == -1:
state.job_count = self.n_iter * 2
else:
state.job_count = state.job_count * 2
desired_pixel_count = 512 * 512
actual_pixel_count = self.width * self.height
scale = math.sqrt(desired_pixel_count / actual_pixel_count)
self.firstphase_width = math.ceil(scale * self.width / 64) * 64
self.firstphase_height = math.ceil(scale * self.height / 64) * 64
self.firstphase_width_truncated = int(scale * self.width)
self.firstphase_height_truncated = int(scale * self.height)
def sample(self, conditioning, unconditional_conditioning, seeds, subseeds, subseed_strength):
self.sampler = sd_samplers.create_sampler_with_index(sd_samplers.samplers, self.sampler_index, self.sd_model)
if not self.enable_hr:
x = create_random_tensors([opt_C, self.height // opt_f, self.width // opt_f], seeds=seeds, subseeds=subseeds, subseed_strength=self.subseed_strength, seed_resize_from_h=self.seed_resize_from_h, seed_resize_from_w=self.seed_resize_from_w, p=self)
samples = self.sampler.sample(self, x, conditioning, unconditional_conditioning)
return samples
x = create_random_tensors([opt_C, self.firstphase_height // opt_f, self.firstphase_width // opt_f], seeds=seeds, subseeds=subseeds, subseed_strength=self.subseed_strength, seed_resize_from_h=self.seed_resize_from_h, seed_resize_from_w=self.seed_resize_from_w, p=self)
samples = self.sampler.sample(self, x, conditioning, unconditional_conditioning)
truncate_x = (self.firstphase_width - self.firstphase_width_truncated) // opt_f
truncate_y = (self.firstphase_height - self.firstphase_height_truncated) // opt_f
samples = samples[:, :, truncate_y//2:samples.shape[2]-truncate_y//2, truncate_x//2:samples.shape[3]-truncate_x//2]
if self.scale_latent:
samples = torch.nn.functional.interpolate(samples, size=(self.height // opt_f, self.width // opt_f), mode="bilinear")
else:
decoded_samples = self.sd_model.decode_first_stage(samples)
if opts.upscaler_for_img2img is None or opts.upscaler_for_img2img == "None":
decoded_samples = torch.nn.functional.interpolate(decoded_samples, size=(self.height, self.width), mode="bilinear")
else:
lowres_samples = torch.clamp((decoded_samples + 1.0) / 2.0, min=0.0, max=1.0)
batch_images = []
for i, x_sample in enumerate(lowres_samples):
x_sample = 255. * np.moveaxis(x_sample.cpu().numpy(), 0, 2)
x_sample = x_sample.astype(np.uint8)
image = Image.fromarray(x_sample)
image = images.resize_image(0, image, self.width, self.height)
image = np.array(image).astype(np.float32) / 255.0
image = np.moveaxis(image, 2, 0)
batch_images.append(image)
decoded_samples = torch.from_numpy(np.array(batch_images))
decoded_samples = decoded_samples.to(shared.device)
decoded_samples = 2. * decoded_samples - 1.
samples = self.sd_model.get_first_stage_encoding(self.sd_model.encode_first_stage(decoded_samples))
shared.state.nextjob()
self.sampler = sd_samplers.create_sampler_with_index(sd_samplers.samplers, self.sampler_index, self.sd_model)
noise = create_random_tensors(samples.shape[1:], seeds=seeds, subseeds=subseeds, subseed_strength=subseed_strength, seed_resize_from_h=self.seed_resize_from_h, seed_resize_from_w=self.seed_resize_from_w, p=self)
# GC now before running the next img2img to prevent running out of memory
x = None
devices.torch_gc()
samples = self.sampler.sample_img2img(self, samples, noise, conditioning, unconditional_conditioning, steps=self.steps)
return samples
class StableDiffusionProcessingImg2Img(StableDiffusionProcessing):
sampler = None
def __init__(self, init_images=None, resize_mode=0, denoising_strength=0.75, mask=None, mask_blur=4, inpainting_fill=0, inpaint_full_res=True, inpaint_full_res_padding=0, inpainting_mask_invert=0, **kwargs):
super().__init__(**kwargs)
self.init_images = init_images
self.resize_mode: int = resize_mode
self.denoising_strength: float = denoising_strength
self.init_latent = None
self.image_mask = mask
#self.image_unblurred_mask = None
self.latent_mask = None
self.mask_for_overlay = None
self.mask_blur = mask_blur
self.inpainting_fill = inpainting_fill
self.inpaint_full_res = inpaint_full_res
self.inpaint_full_res_padding = inpaint_full_res_padding
self.inpainting_mask_invert = inpainting_mask_invert
self.mask = None
self.nmask = None
def init(self, all_prompts, all_seeds, all_subseeds):
self.sampler = sd_samplers.create_sampler_with_index(sd_samplers.samplers_for_img2img, self.sampler_index, self.sd_model)
crop_region = None
if self.image_mask is not None:
self.image_mask = self.image_mask.convert('L')
if self.inpainting_mask_invert:
self.image_mask = ImageOps.invert(self.image_mask)
#self.image_unblurred_mask = self.image_mask
if self.mask_blur > 0:
self.image_mask = self.image_mask.filter(ImageFilter.GaussianBlur(self.mask_blur))
if self.inpaint_full_res:
self.mask_for_overlay = self.image_mask
mask = self.image_mask.convert('L')
crop_region = masking.get_crop_region(np.array(mask), self.inpaint_full_res_padding)
crop_region = masking.expand_crop_region(crop_region, self.width, self.height, mask.width, mask.height)
x1, y1, x2, y2 = crop_region
mask = mask.crop(crop_region)
self.image_mask = images.resize_image(2, mask, self.width, self.height)
self.paste_to = (x1, y1, x2-x1, y2-y1)
else:
self.image_mask = images.resize_image(self.resize_mode, self.image_mask, self.width, self.height)
np_mask = np.array(self.image_mask)
np_mask = np.clip((np_mask.astype(np.float32)) * 2, 0, 255).astype(np.uint8)
self.mask_for_overlay = Image.fromarray(np_mask)
self.overlay_images = []
latent_mask = self.latent_mask if self.latent_mask is not None else self.image_mask
add_color_corrections = opts.img2img_color_correction and self.color_corrections is None
if add_color_corrections:
self.color_corrections = []
imgs = []
for img in self.init_images:
image = img.convert("RGB")
if crop_region is None:
image = images.resize_image(self.resize_mode, image, self.width, self.height)
if self.image_mask is not None:
image_masked = Image.new('RGBa', (image.width, image.height))
image_masked.paste(image.convert("RGBA").convert("RGBa"), mask=ImageOps.invert(self.mask_for_overlay.convert('L')))
self.overlay_images.append(image_masked.convert('RGBA'))
if crop_region is not None:
image = image.crop(crop_region)
image = images.resize_image(2, image, self.width, self.height)
if self.image_mask is not None:
if self.inpainting_fill != 1:
image = masking.fill(image, latent_mask)
if add_color_corrections:
self.color_corrections.append(setup_color_correction(image))
image = np.array(image).astype(np.float32) / 255.0
image = np.moveaxis(image, 2, 0)
imgs.append(image)
if len(imgs) == 1:
batch_images = np.expand_dims(imgs[0], axis=0).repeat(self.batch_size, axis=0)
if self.overlay_images is not None:
self.overlay_images = self.overlay_images * self.batch_size
elif len(imgs) <= self.batch_size:
self.batch_size = len(imgs)
batch_images = np.array(imgs)
else:
raise RuntimeError(f"bad number of images passed: {len(imgs)}; expecting {self.batch_size} or less")
image = torch.from_numpy(batch_images)
image = 2. * image - 1.
image = image.to(shared.device)
self.init_latent = self.sd_model.get_first_stage_encoding(self.sd_model.encode_first_stage(image))
if self.image_mask is not None:
init_mask = latent_mask
latmask = init_mask.convert('RGB').resize((self.init_latent.shape[3], self.init_latent.shape[2]))
latmask = np.moveaxis(np.array(latmask, dtype=np.float32), 2, 0) / 255
latmask = latmask[0]
latmask = np.around(latmask)
latmask = np.tile(latmask[None], (4, 1, 1))
self.mask = torch.asarray(1.0 - latmask).to(shared.device).type(self.sd_model.dtype)
self.nmask = torch.asarray(latmask).to(shared.device).type(self.sd_model.dtype)
# this needs to be fixed to be done in sample() using actual seeds for batches
if self.inpainting_fill == 2:
self.init_latent = self.init_latent * self.mask + create_random_tensors(self.init_latent.shape[1:], all_seeds[0:self.init_latent.shape[0]]) * self.nmask
elif self.inpainting_fill == 3:
self.init_latent = self.init_latent * self.mask
def sample(self, conditioning, unconditional_conditioning, seeds, subseeds, subseed_strength):
x = create_random_tensors([opt_C, self.height // opt_f, self.width // opt_f], seeds=seeds, subseeds=subseeds, subseed_strength=self.subseed_strength, seed_resize_from_h=self.seed_resize_from_h, seed_resize_from_w=self.seed_resize_from_w, p=self)
samples = self.sampler.sample_img2img(self, self.init_latent, x, conditioning, unconditional_conditioning)
if self.mask is not None:
samples = samples * self.nmask + self.init_latent * self.mask
del x
devices.torch_gc()
return samples
<|code_end|>
modules/sd_models.py
<|code_start|>import glob
import os.path
import sys
from collections import namedtuple
import torch
from omegaconf import OmegaConf
from pathlib import Path
from ldm.util import instantiate_from_config
from modules import shared, modelloader, devices
from modules.paths import models_path
model_dir = "Stable-diffusion"
model_path = os.path.abspath(os.path.join(models_path, model_dir))
CheckpointInfo = namedtuple("CheckpointInfo", ['filename', 'title', 'hash', 'model_name', 'config'])
checkpoints_list = {}
try:
# this silences the annoying "Some weights of the model checkpoint were not used when initializing..." message at start.
from transformers import logging
logging.set_verbosity_error()
except Exception:
pass
def setup_model():
if not os.path.exists(model_path):
os.makedirs(model_path)
list_models()
def checkpoint_tiles():
return sorted([x.title for x in checkpoints_list.values()])
def list_models():
checkpoints_list.clear()
model_list = modelloader.load_models(model_path=model_path, command_path=shared.cmd_opts.ckpt_dir, ext_filter=[".ckpt"])
def modeltitle(path, shorthash):
abspath = os.path.abspath(path)
if shared.cmd_opts.ckpt_dir is not None and abspath.startswith(shared.cmd_opts.ckpt_dir):
name = abspath.replace(shared.cmd_opts.ckpt_dir, '')
elif abspath.startswith(model_path):
name = abspath.replace(model_path, '')
else:
name = os.path.basename(path)
if name.startswith("\\") or name.startswith("/"):
name = name[1:]
shortname = os.path.splitext(name.replace("/", "_").replace("\\", "_"))[0]
return f'{name} [{shorthash}]', shortname
cmd_ckpt = shared.cmd_opts.ckpt
if os.path.exists(cmd_ckpt):
h = model_hash(cmd_ckpt)
title, short_model_name = modeltitle(cmd_ckpt, h)
checkpoints_list[title] = CheckpointInfo(cmd_ckpt, title, h, short_model_name, shared.cmd_opts.config)
shared.opts.data['sd_model_checkpoint'] = title
elif cmd_ckpt is not None and cmd_ckpt != shared.default_sd_model_file:
print(f"Checkpoint in --ckpt argument not found (Possible it was moved to {model_path}: {cmd_ckpt}", file=sys.stderr)
for filename in model_list:
h = model_hash(filename)
title, short_model_name = modeltitle(filename, h)
basename, _ = os.path.splitext(filename)
config = basename + ".yaml"
if not os.path.exists(config):
config = shared.cmd_opts.config
checkpoints_list[title] = CheckpointInfo(filename, title, h, short_model_name, config)
def get_closet_checkpoint_match(searchString):
applicable = sorted([info for info in checkpoints_list.values() if searchString in info.title], key = lambda x:len(x.title))
if len(applicable) > 0:
return applicable[0]
return None
def model_hash(filename):
try:
with open(filename, "rb") as file:
import hashlib
m = hashlib.sha256()
file.seek(0x100000)
m.update(file.read(0x10000))
return m.hexdigest()[0:8]
except FileNotFoundError:
return 'NOFILE'
def select_checkpoint():
model_checkpoint = shared.opts.sd_model_checkpoint
checkpoint_info = checkpoints_list.get(model_checkpoint, None)
if checkpoint_info is not None:
return checkpoint_info
if len(checkpoints_list) == 0:
print(f"No checkpoints found. When searching for checkpoints, looked at:", file=sys.stderr)
if shared.cmd_opts.ckpt is not None:
print(f" - file {os.path.abspath(shared.cmd_opts.ckpt)}", file=sys.stderr)
print(f" - directory {model_path}", file=sys.stderr)
if shared.cmd_opts.ckpt_dir is not None:
print(f" - directory {os.path.abspath(shared.cmd_opts.ckpt_dir)}", file=sys.stderr)
print(f"Can't run without a checkpoint. Find and place a .ckpt file into any of those locations. The program will exit.", file=sys.stderr)
exit(1)
checkpoint_info = next(iter(checkpoints_list.values()))
if model_checkpoint is not None:
print(f"Checkpoint {model_checkpoint} not found; loading fallback {checkpoint_info.title}", file=sys.stderr)
return checkpoint_info
def get_state_dict_from_checkpoint(pl_sd):
if "state_dict" in pl_sd:
return pl_sd["state_dict"]
return pl_sd
def load_model_weights(model, checkpoint_info):
checkpoint_file = checkpoint_info.filename
sd_model_hash = checkpoint_info.hash
print(f"Loading weights [{sd_model_hash}] from {checkpoint_file}")
pl_sd = torch.load(checkpoint_file, map_location="cpu")
if "global_step" in pl_sd:
print(f"Global Step: {pl_sd['global_step']}")
sd = get_state_dict_from_checkpoint(pl_sd)
model.load_state_dict(sd, strict=False)
if shared.cmd_opts.opt_channelslast:
model.to(memory_format=torch.channels_last)
if not shared.cmd_opts.no_half:
model.half()
devices.dtype = torch.float32 if shared.cmd_opts.no_half else torch.float16
vae_file = os.path.splitext(checkpoint_file)[0] + ".vae.pt"
if os.path.exists(vae_file):
print(f"Loading VAE weights from: {vae_file}")
vae_ckpt = torch.load(vae_file, map_location="cpu")
vae_dict = {k: v for k, v in vae_ckpt["state_dict"].items() if k[0:4] != "loss"}
model.first_stage_model.load_state_dict(vae_dict)
model.sd_model_vae_name = Path(vae_file).stem
model.sd_model_hash = sd_model_hash
model.sd_model_checkpoint = checkpoint_file
model.sd_checkpoint_info = checkpoint_info
def load_model():
from modules import lowvram, sd_hijack
checkpoint_info = select_checkpoint()
if checkpoint_info.config != shared.cmd_opts.config:
print(f"Loading config from: {checkpoint_info.config}")
sd_config = OmegaConf.load(checkpoint_info.config)
sd_model = instantiate_from_config(sd_config.model)
load_model_weights(sd_model, checkpoint_info)
if shared.cmd_opts.lowvram or shared.cmd_opts.medvram:
lowvram.setup_for_low_vram(sd_model, shared.cmd_opts.medvram)
else:
sd_model.to(shared.device)
sd_hijack.model_hijack.hijack(sd_model)
sd_model.eval()
print(f"Model loaded.")
return sd_model
def reload_model_weights(sd_model, info=None):
from modules import lowvram, devices, sd_hijack
checkpoint_info = info or select_checkpoint()
if sd_model.sd_model_checkpoint == checkpoint_info.filename:
return
if sd_model.sd_checkpoint_info.config != checkpoint_info.config:
return load_model()
if shared.cmd_opts.lowvram or shared.cmd_opts.medvram:
lowvram.send_everything_to_cpu()
else:
sd_model.to(devices.cpu)
sd_hijack.model_hijack.undo_hijack(sd_model)
load_model_weights(sd_model, checkpoint_info)
sd_hijack.model_hijack.hijack(sd_model)
if not shared.cmd_opts.lowvram and not shared.cmd_opts.medvram:
sd_model.to(devices.device)
print(f"Weights loaded.")
return sd_model
<|code_end|>
modules/shared.py
<|code_start|>import argparse
import datetime
import json
import os
import sys
import gradio as gr
import tqdm
import modules.artists
import modules.interrogate
import modules.memmon
import modules.sd_models
import modules.styles
import modules.devices as devices
from modules import sd_samplers, hypernetwork
from modules.paths import models_path, script_path, sd_path
sd_model_file = os.path.join(script_path, 'model.ckpt')
default_sd_model_file = sd_model_file
parser = argparse.ArgumentParser()
parser.add_argument("--config", type=str, default=os.path.join(sd_path, "configs/stable-diffusion/v1-inference.yaml"), help="path to config which constructs model",)
parser.add_argument("--ckpt", type=str, default=sd_model_file, help="path to checkpoint of stable diffusion model; if specified, this checkpoint will be added to the list of checkpoints and loaded",)
parser.add_argument("--ckpt-dir", type=str, default=None, help="Path to directory with stable diffusion checkpoints")
parser.add_argument("--gfpgan-dir", type=str, help="GFPGAN directory", default=('./src/gfpgan' if os.path.exists('./src/gfpgan') else './GFPGAN'))
parser.add_argument("--gfpgan-model", type=str, help="GFPGAN model file name", default=None)
parser.add_argument("--no-half", action='store_true', help="do not switch the model to 16-bit floats")
parser.add_argument("--no-progressbar-hiding", action='store_true', help="do not hide progressbar in gradio UI (we hide it because it slows down ML if you have hardware acceleration in browser)")
parser.add_argument("--max-batch-count", type=int, default=16, help="maximum batch count value for the UI")
parser.add_argument("--embeddings-dir", type=str, default=os.path.join(script_path, 'embeddings'), help="embeddings directory for textual inversion (default: embeddings)")
parser.add_argument("--allow-code", action='store_true', help="allow custom script execution from webui")
parser.add_argument("--medvram", action='store_true', help="enable stable diffusion model optimizations for sacrificing a little speed for low VRM usage")
parser.add_argument("--lowvram", action='store_true', help="enable stable diffusion model optimizations for sacrificing a lot of speed for very low VRM usage")
parser.add_argument("--always-batch-cond-uncond", action='store_true', help="disables cond/uncond batching that is enabled to save memory with --medvram or --lowvram")
parser.add_argument("--unload-gfpgan", action='store_true', help="does not do anything.")
parser.add_argument("--precision", type=str, help="evaluate at this precision", choices=["full", "autocast"], default="autocast")
parser.add_argument("--share", action='store_true', help="use share=True for gradio and make the UI accessible through their site (doesn't work for me but you might have better luck)")
parser.add_argument("--codeformer-models-path", type=str, help="Path to directory with codeformer model file(s).", default=os.path.join(models_path, 'Codeformer'))
parser.add_argument("--gfpgan-models-path", type=str, help="Path to directory with GFPGAN model file(s).", default=os.path.join(models_path, 'GFPGAN'))
parser.add_argument("--esrgan-models-path", type=str, help="Path to directory with ESRGAN model file(s).", default=os.path.join(models_path, 'ESRGAN'))
parser.add_argument("--bsrgan-models-path", type=str, help="Path to directory with BSRGAN model file(s).", default=os.path.join(models_path, 'BSRGAN'))
parser.add_argument("--realesrgan-models-path", type=str, help="Path to directory with RealESRGAN model file(s).", default=os.path.join(models_path, 'RealESRGAN'))
parser.add_argument("--scunet-models-path", type=str, help="Path to directory with ScuNET model file(s).", default=os.path.join(models_path, 'ScuNET'))
parser.add_argument("--swinir-models-path", type=str, help="Path to directory with SwinIR model file(s).", default=os.path.join(models_path, 'SwinIR'))
parser.add_argument("--ldsr-models-path", type=str, help="Path to directory with LDSR model file(s).", default=os.path.join(models_path, 'LDSR'))
parser.add_argument("--xformers", action='store_true', help="enable xformers for cross attention layers")
parser.add_argument("--force-enable-xformers", action='store_true', help="enable xformers for cross attention layers regardless of whether the checking code thinks you can run it; do not make bug reports if this fails to work")
parser.add_argument("--deepdanbooru", action='store_true', help="enable deepdanbooru interrogator")
parser.add_argument("--opt-split-attention", action='store_true', help="force-enables cross-attention layer optimization. By default, it's on for torch.cuda and off for other torch devices.")
parser.add_argument("--disable-opt-split-attention", action='store_true', help="force-disables cross-attention layer optimization")
parser.add_argument("--opt-split-attention-v1", action='store_true', help="enable older version of split attention optimization that does not consume all the VRAM it can find")
parser.add_argument("--use-cpu", nargs='+',choices=['SD', 'GFPGAN', 'BSRGAN', 'ESRGAN', 'SCUNet', 'CodeFormer'], help="use CPU as torch device for specified modules", default=[])
parser.add_argument("--listen", action='store_true', help="launch gradio with 0.0.0.0 as server name, allowing to respond to network requests")
parser.add_argument("--port", type=int, help="launch gradio with given server port, you need root/admin rights for ports < 1024, defaults to 7860 if available", default=None)
parser.add_argument("--show-negative-prompt", action='store_true', help="does not do anything", default=False)
parser.add_argument("--ui-config-file", type=str, help="filename to use for ui configuration", default=os.path.join(script_path, 'ui-config.json'))
parser.add_argument("--hide-ui-dir-config", action='store_true', help="hide directory configuration from webui", default=False)
parser.add_argument("--ui-settings-file", type=str, help="filename to use for ui settings", default=os.path.join(script_path, 'config.json'))
parser.add_argument("--gradio-debug", action='store_true', help="launch gradio with --debug option")
parser.add_argument("--gradio-auth", type=str, help='set gradio authentication like "username:password"; or comma-delimit multiple like "u1:p1,u2:p2,u3:p3"', default=None)
parser.add_argument("--gradio-img2img-tool", type=str, help='gradio image uploader tool: can be either editor for ctopping, or color-sketch for drawing', choices=["color-sketch", "editor"], default="editor")
parser.add_argument("--opt-channelslast", action='store_true', help="change memory type for stable diffusion to channels last")
parser.add_argument("--styles-file", type=str, help="filename to use for styles", default=os.path.join(script_path, 'styles.csv'))
parser.add_argument("--autolaunch", action='store_true', help="open the webui URL in the system's default browser upon launch", default=False)
parser.add_argument("--use-textbox-seed", action='store_true', help="use textbox for seeds in UI (no up/down, but possible to input long seeds)", default=False)
parser.add_argument("--disable-console-progressbars", action='store_true', help="do not output progressbars to console", default=False)
parser.add_argument("--enable-console-prompts", action='store_true', help="print prompts to console when generating with txt2img and img2img", default=False)
cmd_opts = parser.parse_args()
devices.device, devices.device_gfpgan, devices.device_bsrgan, devices.device_esrgan, devices.device_scunet, devices.device_codeformer = \
(devices.cpu if x in cmd_opts.use_cpu else devices.get_optimal_device() for x in ['SD', 'GFPGAN', 'BSRGAN', 'ESRGAN', 'SCUNet', 'CodeFormer'])
device = devices.device
batch_cond_uncond = cmd_opts.always_batch_cond_uncond or not (cmd_opts.lowvram or cmd_opts.medvram)
parallel_processing_allowed = not cmd_opts.lowvram and not cmd_opts.medvram
xformers_available = False
config_filename = cmd_opts.ui_settings_file
hypernetworks = hypernetwork.list_hypernetworks(os.path.join(models_path, 'hypernetworks'))
loaded_hypernetwork = None
class State:
skipped = False
interrupted = False
job = ""
job_no = 0
job_count = 0
job_timestamp = '0'
sampling_step = 0
sampling_steps = 0
current_latent = None
current_image = None
current_image_sampling_step = 0
textinfo = None
def skip(self):
self.skipped = True
def interrupt(self):
self.interrupted = True
def nextjob(self):
self.job_no += 1
self.sampling_step = 0
self.current_image_sampling_step = 0
def get_job_timestamp(self):
return datetime.datetime.now().strftime("%Y%m%d%H%M%S") # shouldn't this return job_timestamp?
state = State()
artist_db = modules.artists.ArtistsDatabase(os.path.join(script_path, 'artists.csv'))
styles_filename = cmd_opts.styles_file
prompt_styles = modules.styles.StyleDatabase(styles_filename)
interrogator = modules.interrogate.InterrogateModels("interrogate")
face_restorers = []
def realesrgan_models_names():
import modules.realesrgan_model
return [x.name for x in modules.realesrgan_model.get_realesrgan_models(None)]
class OptionInfo:
def __init__(self, default=None, label="", component=None, component_args=None, onchange=None):
self.default = default
self.label = label
self.component = component
self.component_args = component_args
self.onchange = onchange
self.section = None
def options_section(section_identifier, options_dict):
for k, v in options_dict.items():
v.section = section_identifier
return options_dict
hide_dirs = {"visible": not cmd_opts.hide_ui_dir_config}
options_templates = {}
options_templates.update(options_section(('saving-images', "Saving images/grids"), {
"samples_save": OptionInfo(True, "Always save all generated images"),
"samples_format": OptionInfo('png', 'File format for images'),
"samples_filename_pattern": OptionInfo("", "Images filename pattern"),
"grid_save": OptionInfo(True, "Always save all generated image grids"),
"grid_format": OptionInfo('png', 'File format for grids'),
"grid_extended_filename": OptionInfo(False, "Add extended info (seed, prompt) to filename when saving grid"),
"grid_only_if_multiple": OptionInfo(True, "Do not save grids consisting of one picture"),
"n_rows": OptionInfo(-1, "Grid row count; use -1 for autodetect and 0 for it to be same as batch size", gr.Slider, {"minimum": -1, "maximum": 16, "step": 1}),
"enable_pnginfo": OptionInfo(True, "Save text information about generation parameters as chunks to png files"),
"save_txt": OptionInfo(False, "Create a text file next to every image with generation parameters."),
"save_images_before_face_restoration": OptionInfo(False, "Save a copy of image before doing face restoration."),
"jpeg_quality": OptionInfo(80, "Quality for saved jpeg images", gr.Slider, {"minimum": 1, "maximum": 100, "step": 1}),
"export_for_4chan": OptionInfo(True, "If PNG image is larger than 4MB or any dimension is larger than 4000, downscale and save copy as JPG"),
"use_original_name_batch": OptionInfo(False, "Use original name for output filename during batch process in extras tab"),
"save_selected_only": OptionInfo(True, "When using 'Save' button, only save a single selected image"),
}))
options_templates.update(options_section(('saving-paths', "Paths for saving"), {
"outdir_samples": OptionInfo("", "Output directory for images; if empty, defaults to three directories below", component_args=hide_dirs),
"outdir_txt2img_samples": OptionInfo("outputs/txt2img-images", 'Output directory for txt2img images', component_args=hide_dirs),
"outdir_img2img_samples": OptionInfo("outputs/img2img-images", 'Output directory for img2img images', component_args=hide_dirs),
"outdir_extras_samples": OptionInfo("outputs/extras-images", 'Output directory for images from extras tab', component_args=hide_dirs),
"outdir_grids": OptionInfo("", "Output directory for grids; if empty, defaults to two directories below", component_args=hide_dirs),
"outdir_txt2img_grids": OptionInfo("outputs/txt2img-grids", 'Output directory for txt2img grids', component_args=hide_dirs),
"outdir_img2img_grids": OptionInfo("outputs/img2img-grids", 'Output directory for img2img grids', component_args=hide_dirs),
"outdir_save": OptionInfo("log/images", "Directory for saving images using the Save button", component_args=hide_dirs),
}))
options_templates.update(options_section(('saving-to-dirs', "Saving to a directory"), {
"save_to_dirs": OptionInfo(False, "Save images to a subdirectory"),
"grid_save_to_dirs": OptionInfo(False, "Save grids to a subdirectory"),
"use_save_to_dirs_for_ui": OptionInfo(False, "When using \"Save\" button, save images to a subdirectory"),
"directories_filename_pattern": OptionInfo("", "Directory name pattern"),
"directories_max_prompt_words": OptionInfo(8, "Max prompt words for [prompt_words] pattern", gr.Slider, {"minimum": 1, "maximum": 20, "step": 1}),
}))
options_templates.update(options_section(('upscaling', "Upscaling"), {
"ESRGAN_tile": OptionInfo(192, "Tile size for ESRGAN upscalers. 0 = no tiling.", gr.Slider, {"minimum": 0, "maximum": 512, "step": 16}),
"ESRGAN_tile_overlap": OptionInfo(8, "Tile overlap, in pixels for ESRGAN upscalers. Low values = visible seam.", gr.Slider, {"minimum": 0, "maximum": 48, "step": 1}),
"realesrgan_enabled_models": OptionInfo(["R-ESRGAN x4+", "R-ESRGAN x4+ Anime6B"], "Select which Real-ESRGAN models to show in the web UI. (Requires restart)", gr.CheckboxGroup, lambda: {"choices": realesrgan_models_names()}),
"SWIN_tile": OptionInfo(192, "Tile size for all SwinIR.", gr.Slider, {"minimum": 16, "maximum": 512, "step": 16}),
"SWIN_tile_overlap": OptionInfo(8, "Tile overlap, in pixels for SwinIR. Low values = visible seam.", gr.Slider, {"minimum": 0, "maximum": 48, "step": 1}),
"ldsr_steps": OptionInfo(100, "LDSR processing steps. Lower = faster", gr.Slider, {"minimum": 1, "maximum": 200, "step": 1}),
"upscaler_for_img2img": OptionInfo(None, "Upscaler for img2img", gr.Dropdown, lambda: {"choices": [x.name for x in sd_upscalers]}),
}))
options_templates.update(options_section(('face-restoration', "Face restoration"), {
"face_restoration_model": OptionInfo(None, "Face restoration model", gr.Radio, lambda: {"choices": [x.name() for x in face_restorers]}),
"code_former_weight": OptionInfo(0.5, "CodeFormer weight parameter; 0 = maximum effect; 1 = minimum effect", gr.Slider, {"minimum": 0, "maximum": 1, "step": 0.01}),
"face_restoration_unload": OptionInfo(False, "Move face restoration model from VRAM into RAM after processing"),
}))
options_templates.update(options_section(('system', "System"), {
"memmon_poll_rate": OptionInfo(8, "VRAM usage polls per second during generation. Set to 0 to disable.", gr.Slider, {"minimum": 0, "maximum": 40, "step": 1}),
"samples_log_stdout": OptionInfo(False, "Always print all generation info to standard output"),
"multiple_tqdm": OptionInfo(True, "Add a second progress bar to the console that shows progress for an entire job."),
}))
options_templates.update(options_section(('sd', "Stable Diffusion"), {
"sd_model_checkpoint": OptionInfo(None, "Stable Diffusion checkpoint", gr.Dropdown, lambda: {"choices": modules.sd_models.checkpoint_tiles()}),
"sd_hypernetwork": OptionInfo("None", "Stable Diffusion finetune hypernetwork", gr.Dropdown, lambda: {"choices": ["None"] + [x for x in hypernetworks.keys()]}),
"img2img_color_correction": OptionInfo(False, "Apply color correction to img2img results to match original colors."),
"save_images_before_color_correction": OptionInfo(False, "Save a copy of image before applying color correction to img2img results"),
"img2img_fix_steps": OptionInfo(False, "With img2img, do exactly the amount of steps the slider specifies (normally you'd do less with less denoising)."),
"enable_quantization": OptionInfo(False, "Enable quantization in K samplers for sharper and cleaner results. This may change existing seeds. Requires restart to apply."),
"enable_emphasis": OptionInfo(True, "Emphasis: use (text) to make model pay more attention to text and [text] to make it pay less attention"),
"use_old_emphasis_implementation": OptionInfo(False, "Use old emphasis implementation. Can be useful to reproduce old seeds."),
"enable_batch_seeds": OptionInfo(True, "Make K-diffusion samplers produce same images in a batch as when making a single image"),
"filter_nsfw": OptionInfo(False, "Filter NSFW content"),
'CLIP_ignore_last_layers': OptionInfo(0, "Ignore last layers of CLIP model", gr.Slider, {"minimum": 0, "maximum": 5, "step": 1}),
"random_artist_categories": OptionInfo([], "Allowed categories for random artists selection when using the Roll button", gr.CheckboxGroup, {"choices": artist_db.categories()}),
}))
options_templates.update(options_section(('interrogate', "Interrogate Options"), {
"interrogate_keep_models_in_memory": OptionInfo(False, "Interrogate: keep models in VRAM"),
"interrogate_use_builtin_artists": OptionInfo(True, "Interrogate: use artists from artists.csv"),
"interrogate_clip_num_beams": OptionInfo(1, "Interrogate: num_beams for BLIP", gr.Slider, {"minimum": 1, "maximum": 16, "step": 1}),
"interrogate_clip_min_length": OptionInfo(24, "Interrogate: minimum description length (excluding artists, etc..)", gr.Slider, {"minimum": 1, "maximum": 128, "step": 1}),
"interrogate_clip_max_length": OptionInfo(48, "Interrogate: maximum description length", gr.Slider, {"minimum": 1, "maximum": 256, "step": 1}),
"interrogate_clip_dict_limit": OptionInfo(1500, "Interrogate: maximum number of lines in text file (0 = No limit)"),
}))
options_templates.update(options_section(('ui', "User interface"), {
"show_progressbar": OptionInfo(True, "Show progressbar"),
"show_progress_every_n_steps": OptionInfo(0, "Show image creation progress every N sampling steps. Set 0 to disable.", gr.Slider, {"minimum": 0, "maximum": 32, "step": 1}),
"return_grid": OptionInfo(True, "Show grid in results for web"),
"do_not_show_images": OptionInfo(False, "Do not show any images in results for web"),
"add_model_hash_to_info": OptionInfo(True, "Add model hash to generation information"),
"add_extended_model_details_to_info": OptionInfo(False, "Add extended model details to generation information (model name, VAE, hypernetwork)"),
"font": OptionInfo("", "Font for image grids that have text"),
"js_modal_lightbox": OptionInfo(True, "Enable full page image viewer"),
"js_modal_lightbox_initially_zoomed": OptionInfo(True, "Show images zoomed in by default in full page image viewer"),
"show_progress_in_title": OptionInfo(True, "Show generation progress in window title."),
}))
options_templates.update(options_section(('sampler-params', "Sampler parameters"), {
"hide_samplers": OptionInfo([], "Hide samplers in user interface (requires restart)", gr.CheckboxGroup, lambda: {"choices": [x.name for x in sd_samplers.all_samplers]}),
"eta_ddim": OptionInfo(0.0, "eta (noise multiplier) for DDIM", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}),
"eta_ancestral": OptionInfo(1.0, "eta (noise multiplier) for ancestral samplers", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}),
"ddim_discretize": OptionInfo('uniform', "img2img DDIM discretize", gr.Radio, {"choices": ['uniform', 'quad']}),
's_churn': OptionInfo(0.0, "sigma churn", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}),
's_tmin': OptionInfo(0.0, "sigma tmin", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}),
's_noise': OptionInfo(1.0, "sigma noise", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}),
}))
class Options:
data = None
data_labels = options_templates
typemap = {int: float}
def __init__(self):
self.data = {k: v.default for k, v in self.data_labels.items()}
def __setattr__(self, key, value):
if self.data is not None:
if key in self.data:
self.data[key] = value
return super(Options, self).__setattr__(key, value)
def __getattr__(self, item):
if self.data is not None:
if item in self.data:
return self.data[item]
if item in self.data_labels:
return self.data_labels[item].default
return super(Options, self).__getattribute__(item)
def save(self, filename):
with open(filename, "w", encoding="utf8") as file:
json.dump(self.data, file)
def same_type(self, x, y):
if x is None or y is None:
return True
type_x = self.typemap.get(type(x), type(x))
type_y = self.typemap.get(type(y), type(y))
return type_x == type_y
def load(self, filename):
with open(filename, "r", encoding="utf8") as file:
self.data = json.load(file)
bad_settings = 0
for k, v in self.data.items():
info = self.data_labels.get(k, None)
if info is not None and not self.same_type(info.default, v):
print(f"Warning: bad setting value: {k}: {v} ({type(v).__name__}; expected {type(info.default).__name__})", file=sys.stderr)
bad_settings += 1
if bad_settings > 0:
print(f"The program is likely to not work with bad settings.\nSettings file: {filename}\nEither fix the file, or delete it and restart.", file=sys.stderr)
def onchange(self, key, func):
item = self.data_labels.get(key)
item.onchange = func
def dumpjson(self):
d = {k: self.data.get(k, self.data_labels.get(k).default) for k in self.data_labels.keys()}
return json.dumps(d)
opts = Options()
if os.path.exists(config_filename):
opts.load(config_filename)
sd_upscalers = []
sd_model = None
progress_print_out = sys.stdout
class TotalTQDM:
def __init__(self):
self._tqdm = None
def reset(self):
self._tqdm = tqdm.tqdm(
desc="Total progress",
total=state.job_count * state.sampling_steps,
position=1,
file=progress_print_out
)
def update(self):
if not opts.multiple_tqdm or cmd_opts.disable_console_progressbars:
return
if self._tqdm is None:
self.reset()
self._tqdm.update()
def updateTotal(self, new_total):
if not opts.multiple_tqdm or cmd_opts.disable_console_progressbars:
return
if self._tqdm is None:
self.reset()
self._tqdm.total=new_total
def clear(self):
if self._tqdm is not None:
self._tqdm.close()
self._tqdm = None
total_tqdm = TotalTQDM()
mem_mon = modules.memmon.MemUsageMonitor("MemMon", device, opts)
mem_mon.start()
<|code_end|>
|
Uncapped token limit does not work past the ~85th token
**Describe the bug**
Tokens past ~85 do not really affect the prompt in a expected or consistant way. Tokens past ~90 do not do anything (except provide noise). It would be nice if someone could try on NovelAI (as they have a cap of ~200) if the behavior is the same.
**To Reproduce**
I have generated an image with the same seed multiple times, with the text `green eyes` at different token positions and varying attention. Commas are used as an unobtrusive padding to help get up to the nth token, without changing the image in any significant way. The end of the prompt is also padded up to the 99th token.
All images are generated with: Steps: 20, Sampler: Euler, CFG scale: 11, Seed: 2347389309, Size: 512x640, Model hash: 925997e9, Clip skip: 2
Negative prompt: `lowres, bad anatomy, bad hands, text, error, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality, normal quality, jpeg artifacts, signature, watermark, username, blurry, artist name`
Using a certain trendy model (final-pruned), with the VAE and no hypernetworks. This model has been demonstrated to support up to ~200 tokens on the premium UI.
With `green eyes` at the:
- **76**th: `masterpiece, 1girl, cute, , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , green eyes, , , , , , , , , , , , , , , , , , , , , ,` (pass)

- **81**st: `masterpiece, 1girl, cute, , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , green eyes , , , , , , , , , , , , , , , , ,` (fail)

- Still **81**st (x1.5 attn): `masterpiece, 1girl, cute, , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , (green eyes:1.5), , , , , , , , , , , , , , , , ,` (pass, attention helped)

- **86**th (x1.5 attn): `masterpiece, 1girl, cute, , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , (green eyes:1.5), , , , , , , , , , , ,` (fail, eyes are no longer green)

- Still **86**th (x2.0 attn): `masterpiece, 1girl, cute, , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , (green eyes:2.0), , , , , , , , , , , ,` (fail, increasing multiplier did not make the eyes green)

- Still **86**th (x2.5 attn): `masterpiece, 1girl, cute, , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , (green eyes:2.5), , , , , , , , , , , ,` (fail, increasing any more would just destroy the image)

- **91**st (x2.5 attn): `masterpiece, 1girl, cute, , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , (green eyes:2.5), , , , , , ,` (fail, no green at all now)

**Additional context**
I've only demonstrated with 1 seed here, but the behavior is consistent with any seed.
Commas are used, but behavior is the same with normal words/tags.
|
modules/sd_hijack.py
<|code_start|>import math
import os
import sys
import traceback
import torch
import numpy as np
from torch import einsum
from torch.nn.functional import silu
import modules.textual_inversion.textual_inversion
from modules import prompt_parser, devices, sd_hijack_optimizations, shared, hypernetwork
from modules.shared import opts, device, cmd_opts
import ldm.modules.attention
import ldm.modules.diffusionmodules.model
attention_CrossAttention_forward = ldm.modules.attention.CrossAttention.forward
diffusionmodules_model_nonlinearity = ldm.modules.diffusionmodules.model.nonlinearity
diffusionmodules_model_AttnBlock_forward = ldm.modules.diffusionmodules.model.AttnBlock.forward
def apply_optimizations():
undo_optimizations()
ldm.modules.diffusionmodules.model.nonlinearity = silu
if cmd_opts.force_enable_xformers or (cmd_opts.xformers and shared.xformers_available and torch.version.cuda and torch.cuda.get_device_capability(shared.device) == (8, 6)):
print("Applying xformers cross attention optimization.")
ldm.modules.attention.CrossAttention.forward = sd_hijack_optimizations.xformers_attention_forward
ldm.modules.diffusionmodules.model.AttnBlock.forward = sd_hijack_optimizations.xformers_attnblock_forward
elif cmd_opts.opt_split_attention_v1:
print("Applying v1 cross attention optimization.")
ldm.modules.attention.CrossAttention.forward = sd_hijack_optimizations.split_cross_attention_forward_v1
elif not cmd_opts.disable_opt_split_attention and (cmd_opts.opt_split_attention or torch.cuda.is_available()):
print("Applying cross attention optimization.")
ldm.modules.attention.CrossAttention.forward = sd_hijack_optimizations.split_cross_attention_forward
ldm.modules.diffusionmodules.model.AttnBlock.forward = sd_hijack_optimizations.cross_attention_attnblock_forward
def undo_optimizations():
ldm.modules.attention.CrossAttention.forward = hypernetwork.attention_CrossAttention_forward
ldm.modules.diffusionmodules.model.nonlinearity = diffusionmodules_model_nonlinearity
ldm.modules.diffusionmodules.model.AttnBlock.forward = diffusionmodules_model_AttnBlock_forward
def get_target_prompt_token_count(token_count):
if token_count < 75:
return 75
return math.ceil(token_count / 10) * 10
class StableDiffusionModelHijack:
fixes = None
comments = []
layers = None
circular_enabled = False
clip = None
embedding_db = modules.textual_inversion.textual_inversion.EmbeddingDatabase(cmd_opts.embeddings_dir)
def hijack(self, m):
model_embeddings = m.cond_stage_model.transformer.text_model.embeddings
model_embeddings.token_embedding = EmbeddingsWithFixes(model_embeddings.token_embedding, self)
m.cond_stage_model = FrozenCLIPEmbedderWithCustomWords(m.cond_stage_model, self)
self.clip = m.cond_stage_model
apply_optimizations()
def flatten(el):
flattened = [flatten(children) for children in el.children()]
res = [el]
for c in flattened:
res += c
return res
self.layers = flatten(m)
def undo_hijack(self, m):
if type(m.cond_stage_model) == FrozenCLIPEmbedderWithCustomWords:
m.cond_stage_model = m.cond_stage_model.wrapped
model_embeddings = m.cond_stage_model.transformer.text_model.embeddings
if type(model_embeddings.token_embedding) == EmbeddingsWithFixes:
model_embeddings.token_embedding = model_embeddings.token_embedding.wrapped
def apply_circular(self, enable):
if self.circular_enabled == enable:
return
self.circular_enabled = enable
for layer in [layer for layer in self.layers if type(layer) == torch.nn.Conv2d]:
layer.padding_mode = 'circular' if enable else 'zeros'
def clear_comments(self):
self.comments = []
def tokenize(self, text):
_, remade_batch_tokens, _, _, _, token_count = self.clip.process_text([text])
return remade_batch_tokens[0], token_count, get_target_prompt_token_count(token_count)
class FrozenCLIPEmbedderWithCustomWords(torch.nn.Module):
def __init__(self, wrapped, hijack):
super().__init__()
self.wrapped = wrapped
self.hijack: StableDiffusionModelHijack = hijack
self.tokenizer = wrapped.tokenizer
self.token_mults = {}
tokens_with_parens = [(k, v) for k, v in self.tokenizer.get_vocab().items() if '(' in k or ')' in k or '[' in k or ']' in k]
for text, ident in tokens_with_parens:
mult = 1.0
for c in text:
if c == '[':
mult /= 1.1
if c == ']':
mult *= 1.1
if c == '(':
mult *= 1.1
if c == ')':
mult /= 1.1
if mult != 1.0:
self.token_mults[ident] = mult
def tokenize_line(self, line, used_custom_terms, hijack_comments):
id_start = self.wrapped.tokenizer.bos_token_id
id_end = self.wrapped.tokenizer.eos_token_id
if opts.enable_emphasis:
parsed = prompt_parser.parse_prompt_attention(line)
else:
parsed = [[line, 1.0]]
tokenized = self.wrapped.tokenizer([text for text, _ in parsed], truncation=False, add_special_tokens=False)["input_ids"]
fixes = []
remade_tokens = []
multipliers = []
for tokens, (text, weight) in zip(tokenized, parsed):
i = 0
while i < len(tokens):
token = tokens[i]
embedding, embedding_length_in_tokens = self.hijack.embedding_db.find_embedding_at_position(tokens, i)
if embedding is None:
remade_tokens.append(token)
multipliers.append(weight)
i += 1
else:
emb_len = int(embedding.vec.shape[0])
fixes.append((len(remade_tokens), embedding))
remade_tokens += [0] * emb_len
multipliers += [weight] * emb_len
used_custom_terms.append((embedding.name, embedding.checksum()))
i += embedding_length_in_tokens
token_count = len(remade_tokens)
prompt_target_length = get_target_prompt_token_count(token_count)
tokens_to_add = prompt_target_length - len(remade_tokens) + 1
remade_tokens = [id_start] + remade_tokens + [id_end] * tokens_to_add
multipliers = [1.0] + multipliers + [1.0] * tokens_to_add
return remade_tokens, fixes, multipliers, token_count
def process_text(self, texts):
used_custom_terms = []
remade_batch_tokens = []
hijack_comments = []
hijack_fixes = []
token_count = 0
cache = {}
batch_multipliers = []
for line in texts:
if line in cache:
remade_tokens, fixes, multipliers = cache[line]
else:
remade_tokens, fixes, multipliers, current_token_count = self.tokenize_line(line, used_custom_terms, hijack_comments)
token_count = max(current_token_count, token_count)
cache[line] = (remade_tokens, fixes, multipliers)
remade_batch_tokens.append(remade_tokens)
hijack_fixes.append(fixes)
batch_multipliers.append(multipliers)
return batch_multipliers, remade_batch_tokens, used_custom_terms, hijack_comments, hijack_fixes, token_count
def process_text_old(self, text):
id_start = self.wrapped.tokenizer.bos_token_id
id_end = self.wrapped.tokenizer.eos_token_id
maxlen = self.wrapped.max_length # you get to stay at 77
used_custom_terms = []
remade_batch_tokens = []
overflowing_words = []
hijack_comments = []
hijack_fixes = []
token_count = 0
cache = {}
batch_tokens = self.wrapped.tokenizer(text, truncation=False, add_special_tokens=False)["input_ids"]
batch_multipliers = []
for tokens in batch_tokens:
tuple_tokens = tuple(tokens)
if tuple_tokens in cache:
remade_tokens, fixes, multipliers = cache[tuple_tokens]
else:
fixes = []
remade_tokens = []
multipliers = []
mult = 1.0
i = 0
while i < len(tokens):
token = tokens[i]
embedding, embedding_length_in_tokens = self.hijack.embedding_db.find_embedding_at_position(tokens, i)
mult_change = self.token_mults.get(token) if opts.enable_emphasis else None
if mult_change is not None:
mult *= mult_change
i += 1
elif embedding is None:
remade_tokens.append(token)
multipliers.append(mult)
i += 1
else:
emb_len = int(embedding.vec.shape[0])
fixes.append((len(remade_tokens), embedding))
remade_tokens += [0] * emb_len
multipliers += [mult] * emb_len
used_custom_terms.append((embedding.name, embedding.checksum()))
i += embedding_length_in_tokens
if len(remade_tokens) > maxlen - 2:
vocab = {v: k for k, v in self.wrapped.tokenizer.get_vocab().items()}
ovf = remade_tokens[maxlen - 2:]
overflowing_words = [vocab.get(int(x), "") for x in ovf]
overflowing_text = self.wrapped.tokenizer.convert_tokens_to_string(''.join(overflowing_words))
hijack_comments.append(f"Warning: too many input tokens; some ({len(overflowing_words)}) have been truncated:\n{overflowing_text}\n")
token_count = len(remade_tokens)
remade_tokens = remade_tokens + [id_end] * (maxlen - 2 - len(remade_tokens))
remade_tokens = [id_start] + remade_tokens[0:maxlen-2] + [id_end]
cache[tuple_tokens] = (remade_tokens, fixes, multipliers)
multipliers = multipliers + [1.0] * (maxlen - 2 - len(multipliers))
multipliers = [1.0] + multipliers[0:maxlen - 2] + [1.0]
remade_batch_tokens.append(remade_tokens)
hijack_fixes.append(fixes)
batch_multipliers.append(multipliers)
return batch_multipliers, remade_batch_tokens, used_custom_terms, hijack_comments, hijack_fixes, token_count
def forward(self, text):
if opts.use_old_emphasis_implementation:
batch_multipliers, remade_batch_tokens, used_custom_terms, hijack_comments, hijack_fixes, token_count = self.process_text_old(text)
else:
batch_multipliers, remade_batch_tokens, used_custom_terms, hijack_comments, hijack_fixes, token_count = self.process_text(text)
self.hijack.fixes = hijack_fixes
self.hijack.comments += hijack_comments
if len(used_custom_terms) > 0:
self.hijack.comments.append("Used embeddings: " + ", ".join([f'{word} [{checksum}]' for word, checksum in used_custom_terms]))
target_token_count = get_target_prompt_token_count(token_count) + 2
position_ids_array = [min(x, 75) for x in range(target_token_count-1)] + [76]
position_ids = torch.asarray(position_ids_array, device=devices.device).expand((1, -1))
remade_batch_tokens_of_same_length = [x + [self.wrapped.tokenizer.eos_token_id] * (target_token_count - len(x)) for x in remade_batch_tokens]
tokens = torch.asarray(remade_batch_tokens_of_same_length).to(device)
outputs = self.wrapped.transformer(input_ids=tokens, position_ids=position_ids, output_hidden_states=-opts.CLIP_stop_at_last_layers)
if opts.CLIP_stop_at_last_layers > 1:
z = outputs.hidden_states[-opts.CLIP_stop_at_last_layers]
z = self.wrapped.transformer.text_model.final_layer_norm(z)
else:
z = outputs.last_hidden_state
# restoring original mean is likely not correct, but it seems to work well to prevent artifacts that happen otherwise
batch_multipliers_of_same_length = [x + [1.0] * (target_token_count - len(x)) for x in batch_multipliers]
batch_multipliers = torch.asarray(batch_multipliers_of_same_length).to(device)
original_mean = z.mean()
z *= batch_multipliers.reshape(batch_multipliers.shape + (1,)).expand(z.shape)
new_mean = z.mean()
z *= original_mean / new_mean
return z
class EmbeddingsWithFixes(torch.nn.Module):
def __init__(self, wrapped, embeddings):
super().__init__()
self.wrapped = wrapped
self.embeddings = embeddings
def forward(self, input_ids):
batch_fixes = self.embeddings.fixes
self.embeddings.fixes = None
inputs_embeds = self.wrapped(input_ids)
if batch_fixes is None or len(batch_fixes) == 0 or max([len(x) for x in batch_fixes]) == 0:
return inputs_embeds
vecs = []
for fixes, tensor in zip(batch_fixes, inputs_embeds):
for offset, embedding in fixes:
emb = embedding.vec
emb_len = min(tensor.shape[0]-offset-1, emb.shape[0])
tensor = torch.cat([tensor[0:offset+1], emb[0:emb_len], tensor[offset+1+emb_len:]])
vecs.append(tensor)
return torch.stack(vecs)
def add_circular_option_to_conv_2d():
conv2d_constructor = torch.nn.Conv2d.__init__
def conv2d_constructor_circular(self, *args, **kwargs):
return conv2d_constructor(self, *args, padding_mode='circular', **kwargs)
torch.nn.Conv2d.__init__ = conv2d_constructor_circular
model_hijack = StableDiffusionModelHijack()
<|code_end|>
|
modules/sd_hijack.py
<|code_start|>import math
import os
import sys
import traceback
import torch
import numpy as np
from torch import einsum
from torch.nn.functional import silu
import modules.textual_inversion.textual_inversion
from modules import prompt_parser, devices, sd_hijack_optimizations, shared, hypernetwork
from modules.shared import opts, device, cmd_opts
import ldm.modules.attention
import ldm.modules.diffusionmodules.model
attention_CrossAttention_forward = ldm.modules.attention.CrossAttention.forward
diffusionmodules_model_nonlinearity = ldm.modules.diffusionmodules.model.nonlinearity
diffusionmodules_model_AttnBlock_forward = ldm.modules.diffusionmodules.model.AttnBlock.forward
def apply_optimizations():
undo_optimizations()
ldm.modules.diffusionmodules.model.nonlinearity = silu
if cmd_opts.force_enable_xformers or (cmd_opts.xformers and shared.xformers_available and torch.version.cuda and torch.cuda.get_device_capability(shared.device) == (8, 6)):
print("Applying xformers cross attention optimization.")
ldm.modules.attention.CrossAttention.forward = sd_hijack_optimizations.xformers_attention_forward
ldm.modules.diffusionmodules.model.AttnBlock.forward = sd_hijack_optimizations.xformers_attnblock_forward
elif cmd_opts.opt_split_attention_v1:
print("Applying v1 cross attention optimization.")
ldm.modules.attention.CrossAttention.forward = sd_hijack_optimizations.split_cross_attention_forward_v1
elif not cmd_opts.disable_opt_split_attention and (cmd_opts.opt_split_attention or torch.cuda.is_available()):
print("Applying cross attention optimization.")
ldm.modules.attention.CrossAttention.forward = sd_hijack_optimizations.split_cross_attention_forward
ldm.modules.diffusionmodules.model.AttnBlock.forward = sd_hijack_optimizations.cross_attention_attnblock_forward
def undo_optimizations():
ldm.modules.attention.CrossAttention.forward = hypernetwork.attention_CrossAttention_forward
ldm.modules.diffusionmodules.model.nonlinearity = diffusionmodules_model_nonlinearity
ldm.modules.diffusionmodules.model.AttnBlock.forward = diffusionmodules_model_AttnBlock_forward
def get_target_prompt_token_count(token_count):
return math.ceil(max(token_count, 1) / 75) * 75
class StableDiffusionModelHijack:
fixes = None
comments = []
layers = None
circular_enabled = False
clip = None
embedding_db = modules.textual_inversion.textual_inversion.EmbeddingDatabase(cmd_opts.embeddings_dir)
def hijack(self, m):
model_embeddings = m.cond_stage_model.transformer.text_model.embeddings
model_embeddings.token_embedding = EmbeddingsWithFixes(model_embeddings.token_embedding, self)
m.cond_stage_model = FrozenCLIPEmbedderWithCustomWords(m.cond_stage_model, self)
self.clip = m.cond_stage_model
apply_optimizations()
def flatten(el):
flattened = [flatten(children) for children in el.children()]
res = [el]
for c in flattened:
res += c
return res
self.layers = flatten(m)
def undo_hijack(self, m):
if type(m.cond_stage_model) == FrozenCLIPEmbedderWithCustomWords:
m.cond_stage_model = m.cond_stage_model.wrapped
model_embeddings = m.cond_stage_model.transformer.text_model.embeddings
if type(model_embeddings.token_embedding) == EmbeddingsWithFixes:
model_embeddings.token_embedding = model_embeddings.token_embedding.wrapped
def apply_circular(self, enable):
if self.circular_enabled == enable:
return
self.circular_enabled = enable
for layer in [layer for layer in self.layers if type(layer) == torch.nn.Conv2d]:
layer.padding_mode = 'circular' if enable else 'zeros'
def clear_comments(self):
self.comments = []
def tokenize(self, text):
_, remade_batch_tokens, _, _, _, token_count = self.clip.process_text([text])
return remade_batch_tokens[0], token_count, get_target_prompt_token_count(token_count)
class FrozenCLIPEmbedderWithCustomWords(torch.nn.Module):
def __init__(self, wrapped, hijack):
super().__init__()
self.wrapped = wrapped
self.hijack: StableDiffusionModelHijack = hijack
self.tokenizer = wrapped.tokenizer
self.token_mults = {}
tokens_with_parens = [(k, v) for k, v in self.tokenizer.get_vocab().items() if '(' in k or ')' in k or '[' in k or ']' in k]
for text, ident in tokens_with_parens:
mult = 1.0
for c in text:
if c == '[':
mult /= 1.1
if c == ']':
mult *= 1.1
if c == '(':
mult *= 1.1
if c == ')':
mult /= 1.1
if mult != 1.0:
self.token_mults[ident] = mult
def tokenize_line(self, line, used_custom_terms, hijack_comments):
id_end = self.wrapped.tokenizer.eos_token_id
if opts.enable_emphasis:
parsed = prompt_parser.parse_prompt_attention(line)
else:
parsed = [[line, 1.0]]
tokenized = self.wrapped.tokenizer([text for text, _ in parsed], truncation=False, add_special_tokens=False)["input_ids"]
fixes = []
remade_tokens = []
multipliers = []
for tokens, (text, weight) in zip(tokenized, parsed):
i = 0
while i < len(tokens):
token = tokens[i]
embedding, embedding_length_in_tokens = self.hijack.embedding_db.find_embedding_at_position(tokens, i)
if embedding is None:
remade_tokens.append(token)
multipliers.append(weight)
i += 1
else:
emb_len = int(embedding.vec.shape[0])
iteration = len(remade_tokens) // 75
if (len(remade_tokens) + emb_len) // 75 != iteration:
rem = (75 * (iteration + 1) - len(remade_tokens))
remade_tokens += [id_end] * rem
multipliers += [1.0] * rem
iteration += 1
fixes.append((iteration, (len(remade_tokens) % 75, embedding)))
remade_tokens += [0] * emb_len
multipliers += [weight] * emb_len
used_custom_terms.append((embedding.name, embedding.checksum()))
i += embedding_length_in_tokens
token_count = len(remade_tokens)
prompt_target_length = get_target_prompt_token_count(token_count)
tokens_to_add = prompt_target_length - len(remade_tokens)
remade_tokens = remade_tokens + [id_end] * tokens_to_add
multipliers = multipliers + [1.0] * tokens_to_add
return remade_tokens, fixes, multipliers, token_count
def process_text(self, texts):
used_custom_terms = []
remade_batch_tokens = []
hijack_comments = []
hijack_fixes = []
token_count = 0
cache = {}
batch_multipliers = []
for line in texts:
if line in cache:
remade_tokens, fixes, multipliers = cache[line]
else:
remade_tokens, fixes, multipliers, current_token_count = self.tokenize_line(line, used_custom_terms, hijack_comments)
token_count = max(current_token_count, token_count)
cache[line] = (remade_tokens, fixes, multipliers)
remade_batch_tokens.append(remade_tokens)
hijack_fixes.append(fixes)
batch_multipliers.append(multipliers)
return batch_multipliers, remade_batch_tokens, used_custom_terms, hijack_comments, hijack_fixes, token_count
def process_text_old(self, text):
id_start = self.wrapped.tokenizer.bos_token_id
id_end = self.wrapped.tokenizer.eos_token_id
maxlen = self.wrapped.max_length # you get to stay at 77
used_custom_terms = []
remade_batch_tokens = []
overflowing_words = []
hijack_comments = []
hijack_fixes = []
token_count = 0
cache = {}
batch_tokens = self.wrapped.tokenizer(text, truncation=False, add_special_tokens=False)["input_ids"]
batch_multipliers = []
for tokens in batch_tokens:
tuple_tokens = tuple(tokens)
if tuple_tokens in cache:
remade_tokens, fixes, multipliers = cache[tuple_tokens]
else:
fixes = []
remade_tokens = []
multipliers = []
mult = 1.0
i = 0
while i < len(tokens):
token = tokens[i]
embedding, embedding_length_in_tokens = self.hijack.embedding_db.find_embedding_at_position(tokens, i)
mult_change = self.token_mults.get(token) if opts.enable_emphasis else None
if mult_change is not None:
mult *= mult_change
i += 1
elif embedding is None:
remade_tokens.append(token)
multipliers.append(mult)
i += 1
else:
emb_len = int(embedding.vec.shape[0])
fixes.append((len(remade_tokens), embedding))
remade_tokens += [0] * emb_len
multipliers += [mult] * emb_len
used_custom_terms.append((embedding.name, embedding.checksum()))
i += embedding_length_in_tokens
if len(remade_tokens) > maxlen - 2:
vocab = {v: k for k, v in self.wrapped.tokenizer.get_vocab().items()}
ovf = remade_tokens[maxlen - 2:]
overflowing_words = [vocab.get(int(x), "") for x in ovf]
overflowing_text = self.wrapped.tokenizer.convert_tokens_to_string(''.join(overflowing_words))
hijack_comments.append(f"Warning: too many input tokens; some ({len(overflowing_words)}) have been truncated:\n{overflowing_text}\n")
token_count = len(remade_tokens)
remade_tokens = remade_tokens + [id_end] * (maxlen - 2 - len(remade_tokens))
remade_tokens = [id_start] + remade_tokens[0:maxlen-2] + [id_end]
cache[tuple_tokens] = (remade_tokens, fixes, multipliers)
multipliers = multipliers + [1.0] * (maxlen - 2 - len(multipliers))
multipliers = [1.0] + multipliers[0:maxlen - 2] + [1.0]
remade_batch_tokens.append(remade_tokens)
hijack_fixes.append(fixes)
batch_multipliers.append(multipliers)
return batch_multipliers, remade_batch_tokens, used_custom_terms, hijack_comments, hijack_fixes, token_count
def forward(self, text):
use_old = opts.use_old_emphasis_implementation
if use_old:
batch_multipliers, remade_batch_tokens, used_custom_terms, hijack_comments, hijack_fixes, token_count = self.process_text_old(text)
else:
batch_multipliers, remade_batch_tokens, used_custom_terms, hijack_comments, hijack_fixes, token_count = self.process_text(text)
self.hijack.comments += hijack_comments
if len(used_custom_terms) > 0:
self.hijack.comments.append("Used embeddings: " + ", ".join([f'{word} [{checksum}]' for word, checksum in used_custom_terms]))
if use_old:
self.hijack.fixes = hijack_fixes
return self.process_tokens(remade_batch_tokens, batch_multipliers)
z = None
i = 0
while max(map(len, remade_batch_tokens)) != 0:
rem_tokens = [x[75:] for x in remade_batch_tokens]
rem_multipliers = [x[75:] for x in batch_multipliers]
self.hijack.fixes = []
for unfiltered in hijack_fixes:
fixes = []
for fix in unfiltered:
if fix[0] == i:
fixes.append(fix[1])
self.hijack.fixes.append(fixes)
z1 = self.process_tokens([x[:75] for x in remade_batch_tokens], [x[:75] for x in batch_multipliers])
z = z1 if z is None else torch.cat((z, z1), axis=-2)
remade_batch_tokens = rem_tokens
batch_multipliers = rem_multipliers
i += 1
return z
def process_tokens(self, remade_batch_tokens, batch_multipliers):
if not opts.use_old_emphasis_implementation:
remade_batch_tokens = [[self.wrapped.tokenizer.bos_token_id] + x[:75] + [self.wrapped.tokenizer.eos_token_id] for x in remade_batch_tokens]
batch_multipliers = [[1.0] + x[:75] + [1.0] for x in batch_multipliers]
tokens = torch.asarray(remade_batch_tokens).to(device)
outputs = self.wrapped.transformer(input_ids=tokens, output_hidden_states=-opts.CLIP_stop_at_last_layers)
if opts.CLIP_stop_at_last_layers > 1:
z = outputs.hidden_states[-opts.CLIP_stop_at_last_layers]
z = self.wrapped.transformer.text_model.final_layer_norm(z)
else:
z = outputs.last_hidden_state
# restoring original mean is likely not correct, but it seems to work well to prevent artifacts that happen otherwise
batch_multipliers_of_same_length = [x + [1.0] * (75 - len(x)) for x in batch_multipliers]
batch_multipliers = torch.asarray(batch_multipliers_of_same_length).to(device)
original_mean = z.mean()
z *= batch_multipliers.reshape(batch_multipliers.shape + (1,)).expand(z.shape)
new_mean = z.mean()
z *= original_mean / new_mean
return z
class EmbeddingsWithFixes(torch.nn.Module):
def __init__(self, wrapped, embeddings):
super().__init__()
self.wrapped = wrapped
self.embeddings = embeddings
def forward(self, input_ids):
batch_fixes = self.embeddings.fixes
self.embeddings.fixes = None
inputs_embeds = self.wrapped(input_ids)
if batch_fixes is None or len(batch_fixes) == 0 or max([len(x) for x in batch_fixes]) == 0:
return inputs_embeds
vecs = []
for fixes, tensor in zip(batch_fixes, inputs_embeds):
for offset, embedding in fixes:
emb = embedding.vec
emb_len = min(tensor.shape[0]-offset-1, emb.shape[0])
tensor = torch.cat([tensor[0:offset+1], emb[0:emb_len], tensor[offset+1+emb_len:]])
vecs.append(tensor)
return torch.stack(vecs)
def add_circular_option_to_conv_2d():
conv2d_constructor = torch.nn.Conv2d.__init__
def conv2d_constructor_circular(self, *args, **kwargs):
return conv2d_constructor(self, *args, padding_mode='circular', **kwargs)
torch.nn.Conv2d.__init__ = conv2d_constructor_circular
model_hijack = StableDiffusionModelHijack()
<|code_end|>
|
Error if more than 75 tokens used
**Describe the bug**
When typing the prompt (txt2img) the number of tokens increases from 75 to 150, but then it errors when I try to generate images. It will run if the number of tokens are below 75.
> Traceback (most recent call last):
> File "F:\stable-diffusion-webui\modules\ui.py", line 182, in f
> res = list(func(*args, **kwargs))
> File "F:\stable-diffusion-webui\webui.py", line 69, in f
> res = func(*args, **kwargs)
> File "F:\stable-diffusion-webui\modules\txt2img.py", line 43, in txt2img
> processed = process_images(p)
> File "F:\stable-diffusion-webui\modules\processing.py", line 394, in process_images
> c = prompt_parser.get_multicond_learned_conditioning(shared.sd_model, prompts, p.steps)
> File "F:\stable-diffusion-webui\modules\prompt_parser.py", line 203, in get_multicond_learned_conditioning
> learned_conditioning = get_learned_conditioning(model, prompt_flat_list, steps)
> File "F:\stable-diffusion-webui\modules\prompt_parser.py", line 138, in get_learned_conditioning
> conds = model.get_learned_conditioning(texts)
> File "F:\stable-diffusion-webui\repositories\stable-diffusion\ldm\models\diffusion\ddpm.py", line 558, in get_learned_conditioning
> c = self.cond_stage_model(c)
> File "F:\stable-diffusion-webui\venv\lib\site-packages\torch\nn\modules\module.py", line 1130, in _call_impl
> return forward_call(*input, **kwargs)
> File "F:\stable-diffusion-webui\modules\sd_hijack.py", line 315, in forward
> z1 = self.process_tokens([x[:75] for x in remade_batch_tokens], [x[:75] for x in batch_multipliers])
> File "F:\stable-diffusion-webui\modules\sd_hijack.py", line 330, in process_tokens
> tokens = torch.asarray(remade_batch_tokens).to(device)
> ValueError: expected sequence of length 2 at dim 1 (got 77)
**To Reproduce**
Steps to reproduce the behavior:
See above
**Expected behavior**
No errors and image is created.
**Desktop (please complete the following information):**
- OS: Windows 11
- Browser: Chrome
- Commit revision: a05c824384f21dfd729e521e8d6cef8da3250bfc
|
modules/sd_hijack.py
<|code_start|>import math
import os
import sys
import traceback
import torch
import numpy as np
from torch import einsum
from torch.nn.functional import silu
import modules.textual_inversion.textual_inversion
from modules import prompt_parser, devices, sd_hijack_optimizations, shared
from modules.shared import opts, device, cmd_opts
from modules.sd_hijack_optimizations import invokeAI_mps_available
import ldm.modules.attention
import ldm.modules.diffusionmodules.model
attention_CrossAttention_forward = ldm.modules.attention.CrossAttention.forward
diffusionmodules_model_nonlinearity = ldm.modules.diffusionmodules.model.nonlinearity
diffusionmodules_model_AttnBlock_forward = ldm.modules.diffusionmodules.model.AttnBlock.forward
def apply_optimizations():
undo_optimizations()
ldm.modules.diffusionmodules.model.nonlinearity = silu
if cmd_opts.force_enable_xformers or (cmd_opts.xformers and shared.xformers_available and torch.version.cuda and (6, 0) <= torch.cuda.get_device_capability(shared.device) <= (8, 6)):
print("Applying xformers cross attention optimization.")
ldm.modules.attention.CrossAttention.forward = sd_hijack_optimizations.xformers_attention_forward
ldm.modules.diffusionmodules.model.AttnBlock.forward = sd_hijack_optimizations.xformers_attnblock_forward
elif cmd_opts.opt_split_attention_v1:
print("Applying v1 cross attention optimization.")
ldm.modules.attention.CrossAttention.forward = sd_hijack_optimizations.split_cross_attention_forward_v1
elif not cmd_opts.disable_opt_split_attention and (cmd_opts.opt_split_attention_invokeai or not torch.cuda.is_available()):
if not invokeAI_mps_available and shared.device.type == 'mps':
print("The InvokeAI cross attention optimization for MPS requires the psutil package which is not installed.")
print("Applying v1 cross attention optimization.")
ldm.modules.attention.CrossAttention.forward = sd_hijack_optimizations.split_cross_attention_forward_v1
else:
print("Applying cross attention optimization (InvokeAI).")
ldm.modules.attention.CrossAttention.forward = sd_hijack_optimizations.split_cross_attention_forward_invokeAI
elif not cmd_opts.disable_opt_split_attention and (cmd_opts.opt_split_attention or torch.cuda.is_available()):
print("Applying cross attention optimization (Doggettx).")
ldm.modules.attention.CrossAttention.forward = sd_hijack_optimizations.split_cross_attention_forward
ldm.modules.diffusionmodules.model.AttnBlock.forward = sd_hijack_optimizations.cross_attention_attnblock_forward
def undo_optimizations():
from modules.hypernetworks import hypernetwork
ldm.modules.attention.CrossAttention.forward = hypernetwork.attention_CrossAttention_forward
ldm.modules.diffusionmodules.model.nonlinearity = diffusionmodules_model_nonlinearity
ldm.modules.diffusionmodules.model.AttnBlock.forward = diffusionmodules_model_AttnBlock_forward
def get_target_prompt_token_count(token_count):
return math.ceil(max(token_count, 1) / 75) * 75
class StableDiffusionModelHijack:
fixes = None
comments = []
layers = None
circular_enabled = False
clip = None
embedding_db = modules.textual_inversion.textual_inversion.EmbeddingDatabase(cmd_opts.embeddings_dir)
def hijack(self, m):
model_embeddings = m.cond_stage_model.transformer.text_model.embeddings
model_embeddings.token_embedding = EmbeddingsWithFixes(model_embeddings.token_embedding, self)
m.cond_stage_model = FrozenCLIPEmbedderWithCustomWords(m.cond_stage_model, self)
self.clip = m.cond_stage_model
apply_optimizations()
def flatten(el):
flattened = [flatten(children) for children in el.children()]
res = [el]
for c in flattened:
res += c
return res
self.layers = flatten(m)
def undo_hijack(self, m):
if type(m.cond_stage_model) == FrozenCLIPEmbedderWithCustomWords:
m.cond_stage_model = m.cond_stage_model.wrapped
model_embeddings = m.cond_stage_model.transformer.text_model.embeddings
if type(model_embeddings.token_embedding) == EmbeddingsWithFixes:
model_embeddings.token_embedding = model_embeddings.token_embedding.wrapped
def apply_circular(self, enable):
if self.circular_enabled == enable:
return
self.circular_enabled = enable
for layer in [layer for layer in self.layers if type(layer) == torch.nn.Conv2d]:
layer.padding_mode = 'circular' if enable else 'zeros'
def clear_comments(self):
self.comments = []
def tokenize(self, text):
_, remade_batch_tokens, _, _, _, token_count = self.clip.process_text([text])
return remade_batch_tokens[0], token_count, get_target_prompt_token_count(token_count)
class FrozenCLIPEmbedderWithCustomWords(torch.nn.Module):
def __init__(self, wrapped, hijack):
super().__init__()
self.wrapped = wrapped
self.hijack: StableDiffusionModelHijack = hijack
self.tokenizer = wrapped.tokenizer
self.token_mults = {}
self.comma_token = [v for k, v in self.tokenizer.get_vocab().items() if k == ',</w>'][0]
tokens_with_parens = [(k, v) for k, v in self.tokenizer.get_vocab().items() if '(' in k or ')' in k or '[' in k or ']' in k]
for text, ident in tokens_with_parens:
mult = 1.0
for c in text:
if c == '[':
mult /= 1.1
if c == ']':
mult *= 1.1
if c == '(':
mult *= 1.1
if c == ')':
mult /= 1.1
if mult != 1.0:
self.token_mults[ident] = mult
def tokenize_line(self, line, used_custom_terms, hijack_comments):
id_end = self.wrapped.tokenizer.eos_token_id
if opts.enable_emphasis:
parsed = prompt_parser.parse_prompt_attention(line)
else:
parsed = [[line, 1.0]]
tokenized = self.wrapped.tokenizer([text for text, _ in parsed], truncation=False, add_special_tokens=False)["input_ids"]
fixes = []
remade_tokens = []
multipliers = []
last_comma = -1
for tokens, (text, weight) in zip(tokenized, parsed):
i = 0
while i < len(tokens):
token = tokens[i]
embedding, embedding_length_in_tokens = self.hijack.embedding_db.find_embedding_at_position(tokens, i)
if token == self.comma_token:
last_comma = len(remade_tokens)
elif opts.comma_padding_backtrack != 0 and max(len(remade_tokens), 1) % 75 == 0 and last_comma != -1 and len(remade_tokens) - last_comma <= opts.comma_padding_backtrack:
last_comma += 1
reloc_tokens = remade_tokens[last_comma:]
reloc_mults = multipliers[last_comma:]
remade_tokens = remade_tokens[:last_comma]
length = len(remade_tokens)
rem = int(math.ceil(length / 75)) * 75 - length
remade_tokens += [id_end] * rem + reloc_tokens
multipliers = multipliers[:last_comma] + [1.0] * rem + reloc_mults
if embedding is None:
remade_tokens.append(token)
multipliers.append(weight)
i += 1
else:
emb_len = int(embedding.vec.shape[0])
iteration = len(remade_tokens) // 75
if (len(remade_tokens) + emb_len) // 75 != iteration:
rem = (75 * (iteration + 1) - len(remade_tokens))
remade_tokens += [id_end] * rem
multipliers += [1.0] * rem
iteration += 1
fixes.append((iteration, (len(remade_tokens) % 75, embedding)))
remade_tokens += [0] * emb_len
multipliers += [weight] * emb_len
used_custom_terms.append((embedding.name, embedding.checksum()))
i += embedding_length_in_tokens
token_count = len(remade_tokens)
prompt_target_length = get_target_prompt_token_count(token_count)
tokens_to_add = prompt_target_length - len(remade_tokens)
remade_tokens = remade_tokens + [id_end] * tokens_to_add
multipliers = multipliers + [1.0] * tokens_to_add
return remade_tokens, fixes, multipliers, token_count
def process_text(self, texts):
used_custom_terms = []
remade_batch_tokens = []
hijack_comments = []
hijack_fixes = []
token_count = 0
cache = {}
batch_multipliers = []
for line in texts:
if line in cache:
remade_tokens, fixes, multipliers = cache[line]
else:
remade_tokens, fixes, multipliers, current_token_count = self.tokenize_line(line, used_custom_terms, hijack_comments)
token_count = max(current_token_count, token_count)
cache[line] = (remade_tokens, fixes, multipliers)
remade_batch_tokens.append(remade_tokens)
hijack_fixes.append(fixes)
batch_multipliers.append(multipliers)
return batch_multipliers, remade_batch_tokens, used_custom_terms, hijack_comments, hijack_fixes, token_count
def process_text_old(self, text):
id_start = self.wrapped.tokenizer.bos_token_id
id_end = self.wrapped.tokenizer.eos_token_id
maxlen = self.wrapped.max_length # you get to stay at 77
used_custom_terms = []
remade_batch_tokens = []
overflowing_words = []
hijack_comments = []
hijack_fixes = []
token_count = 0
cache = {}
batch_tokens = self.wrapped.tokenizer(text, truncation=False, add_special_tokens=False)["input_ids"]
batch_multipliers = []
for tokens in batch_tokens:
tuple_tokens = tuple(tokens)
if tuple_tokens in cache:
remade_tokens, fixes, multipliers = cache[tuple_tokens]
else:
fixes = []
remade_tokens = []
multipliers = []
mult = 1.0
i = 0
while i < len(tokens):
token = tokens[i]
embedding, embedding_length_in_tokens = self.hijack.embedding_db.find_embedding_at_position(tokens, i)
mult_change = self.token_mults.get(token) if opts.enable_emphasis else None
if mult_change is not None:
mult *= mult_change
i += 1
elif embedding is None:
remade_tokens.append(token)
multipliers.append(mult)
i += 1
else:
emb_len = int(embedding.vec.shape[0])
fixes.append((len(remade_tokens), embedding))
remade_tokens += [0] * emb_len
multipliers += [mult] * emb_len
used_custom_terms.append((embedding.name, embedding.checksum()))
i += embedding_length_in_tokens
if len(remade_tokens) > maxlen - 2:
vocab = {v: k for k, v in self.wrapped.tokenizer.get_vocab().items()}
ovf = remade_tokens[maxlen - 2:]
overflowing_words = [vocab.get(int(x), "") for x in ovf]
overflowing_text = self.wrapped.tokenizer.convert_tokens_to_string(''.join(overflowing_words))
hijack_comments.append(f"Warning: too many input tokens; some ({len(overflowing_words)}) have been truncated:\n{overflowing_text}\n")
token_count = len(remade_tokens)
remade_tokens = remade_tokens + [id_end] * (maxlen - 2 - len(remade_tokens))
remade_tokens = [id_start] + remade_tokens[0:maxlen-2] + [id_end]
cache[tuple_tokens] = (remade_tokens, fixes, multipliers)
multipliers = multipliers + [1.0] * (maxlen - 2 - len(multipliers))
multipliers = [1.0] + multipliers[0:maxlen - 2] + [1.0]
remade_batch_tokens.append(remade_tokens)
hijack_fixes.append(fixes)
batch_multipliers.append(multipliers)
return batch_multipliers, remade_batch_tokens, used_custom_terms, hijack_comments, hijack_fixes, token_count
def forward(self, text):
use_old = opts.use_old_emphasis_implementation
if use_old:
batch_multipliers, remade_batch_tokens, used_custom_terms, hijack_comments, hijack_fixes, token_count = self.process_text_old(text)
else:
batch_multipliers, remade_batch_tokens, used_custom_terms, hijack_comments, hijack_fixes, token_count = self.process_text(text)
self.hijack.comments += hijack_comments
if len(used_custom_terms) > 0:
self.hijack.comments.append("Used embeddings: " + ", ".join([f'{word} [{checksum}]' for word, checksum in used_custom_terms]))
if use_old:
self.hijack.fixes = hijack_fixes
return self.process_tokens(remade_batch_tokens, batch_multipliers)
z = None
i = 0
while max(map(len, remade_batch_tokens)) != 0:
rem_tokens = [x[75:] for x in remade_batch_tokens]
rem_multipliers = [x[75:] for x in batch_multipliers]
self.hijack.fixes = []
for unfiltered in hijack_fixes:
fixes = []
for fix in unfiltered:
if fix[0] == i:
fixes.append(fix[1])
self.hijack.fixes.append(fixes)
z1 = self.process_tokens([x[:75] for x in remade_batch_tokens], [x[:75] for x in batch_multipliers])
z = z1 if z is None else torch.cat((z, z1), axis=-2)
remade_batch_tokens = rem_tokens
batch_multipliers = rem_multipliers
i += 1
return z
def process_tokens(self, remade_batch_tokens, batch_multipliers):
if not opts.use_old_emphasis_implementation:
remade_batch_tokens = [[self.wrapped.tokenizer.bos_token_id] + x[:75] + [self.wrapped.tokenizer.eos_token_id] for x in remade_batch_tokens]
batch_multipliers = [[1.0] + x[:75] + [1.0] for x in batch_multipliers]
tokens = torch.asarray(remade_batch_tokens).to(device)
outputs = self.wrapped.transformer(input_ids=tokens, output_hidden_states=-opts.CLIP_stop_at_last_layers)
if opts.CLIP_stop_at_last_layers > 1:
z = outputs.hidden_states[-opts.CLIP_stop_at_last_layers]
z = self.wrapped.transformer.text_model.final_layer_norm(z)
else:
z = outputs.last_hidden_state
# restoring original mean is likely not correct, but it seems to work well to prevent artifacts that happen otherwise
batch_multipliers_of_same_length = [x + [1.0] * (75 - len(x)) for x in batch_multipliers]
batch_multipliers = torch.asarray(batch_multipliers_of_same_length).to(device)
original_mean = z.mean()
z *= batch_multipliers.reshape(batch_multipliers.shape + (1,)).expand(z.shape)
new_mean = z.mean()
z *= original_mean / new_mean
return z
class EmbeddingsWithFixes(torch.nn.Module):
def __init__(self, wrapped, embeddings):
super().__init__()
self.wrapped = wrapped
self.embeddings = embeddings
def forward(self, input_ids):
batch_fixes = self.embeddings.fixes
self.embeddings.fixes = None
inputs_embeds = self.wrapped(input_ids)
if batch_fixes is None or len(batch_fixes) == 0 or max([len(x) for x in batch_fixes]) == 0:
return inputs_embeds
vecs = []
for fixes, tensor in zip(batch_fixes, inputs_embeds):
for offset, embedding in fixes:
emb = embedding.vec
emb_len = min(tensor.shape[0]-offset-1, emb.shape[0])
tensor = torch.cat([tensor[0:offset+1], emb[0:emb_len], tensor[offset+1+emb_len:]])
vecs.append(tensor)
return torch.stack(vecs)
def add_circular_option_to_conv_2d():
conv2d_constructor = torch.nn.Conv2d.__init__
def conv2d_constructor_circular(self, *args, **kwargs):
return conv2d_constructor(self, *args, padding_mode='circular', **kwargs)
torch.nn.Conv2d.__init__ = conv2d_constructor_circular
model_hijack = StableDiffusionModelHijack()
<|code_end|>
|
modules/sd_hijack.py
<|code_start|>import math
import os
import sys
import traceback
import torch
import numpy as np
from torch import einsum
from torch.nn.functional import silu
import modules.textual_inversion.textual_inversion
from modules import prompt_parser, devices, sd_hijack_optimizations, shared
from modules.shared import opts, device, cmd_opts
from modules.sd_hijack_optimizations import invokeAI_mps_available
import ldm.modules.attention
import ldm.modules.diffusionmodules.model
attention_CrossAttention_forward = ldm.modules.attention.CrossAttention.forward
diffusionmodules_model_nonlinearity = ldm.modules.diffusionmodules.model.nonlinearity
diffusionmodules_model_AttnBlock_forward = ldm.modules.diffusionmodules.model.AttnBlock.forward
def apply_optimizations():
undo_optimizations()
ldm.modules.diffusionmodules.model.nonlinearity = silu
if cmd_opts.force_enable_xformers or (cmd_opts.xformers and shared.xformers_available and torch.version.cuda and (6, 0) <= torch.cuda.get_device_capability(shared.device) <= (8, 6)):
print("Applying xformers cross attention optimization.")
ldm.modules.attention.CrossAttention.forward = sd_hijack_optimizations.xformers_attention_forward
ldm.modules.diffusionmodules.model.AttnBlock.forward = sd_hijack_optimizations.xformers_attnblock_forward
elif cmd_opts.opt_split_attention_v1:
print("Applying v1 cross attention optimization.")
ldm.modules.attention.CrossAttention.forward = sd_hijack_optimizations.split_cross_attention_forward_v1
elif not cmd_opts.disable_opt_split_attention and (cmd_opts.opt_split_attention_invokeai or not torch.cuda.is_available()):
if not invokeAI_mps_available and shared.device.type == 'mps':
print("The InvokeAI cross attention optimization for MPS requires the psutil package which is not installed.")
print("Applying v1 cross attention optimization.")
ldm.modules.attention.CrossAttention.forward = sd_hijack_optimizations.split_cross_attention_forward_v1
else:
print("Applying cross attention optimization (InvokeAI).")
ldm.modules.attention.CrossAttention.forward = sd_hijack_optimizations.split_cross_attention_forward_invokeAI
elif not cmd_opts.disable_opt_split_attention and (cmd_opts.opt_split_attention or torch.cuda.is_available()):
print("Applying cross attention optimization (Doggettx).")
ldm.modules.attention.CrossAttention.forward = sd_hijack_optimizations.split_cross_attention_forward
ldm.modules.diffusionmodules.model.AttnBlock.forward = sd_hijack_optimizations.cross_attention_attnblock_forward
def undo_optimizations():
from modules.hypernetworks import hypernetwork
ldm.modules.attention.CrossAttention.forward = hypernetwork.attention_CrossAttention_forward
ldm.modules.diffusionmodules.model.nonlinearity = diffusionmodules_model_nonlinearity
ldm.modules.diffusionmodules.model.AttnBlock.forward = diffusionmodules_model_AttnBlock_forward
def get_target_prompt_token_count(token_count):
return math.ceil(max(token_count, 1) / 75) * 75
class StableDiffusionModelHijack:
fixes = None
comments = []
layers = None
circular_enabled = False
clip = None
embedding_db = modules.textual_inversion.textual_inversion.EmbeddingDatabase(cmd_opts.embeddings_dir)
def hijack(self, m):
model_embeddings = m.cond_stage_model.transformer.text_model.embeddings
model_embeddings.token_embedding = EmbeddingsWithFixes(model_embeddings.token_embedding, self)
m.cond_stage_model = FrozenCLIPEmbedderWithCustomWords(m.cond_stage_model, self)
self.clip = m.cond_stage_model
apply_optimizations()
def flatten(el):
flattened = [flatten(children) for children in el.children()]
res = [el]
for c in flattened:
res += c
return res
self.layers = flatten(m)
def undo_hijack(self, m):
if type(m.cond_stage_model) == FrozenCLIPEmbedderWithCustomWords:
m.cond_stage_model = m.cond_stage_model.wrapped
model_embeddings = m.cond_stage_model.transformer.text_model.embeddings
if type(model_embeddings.token_embedding) == EmbeddingsWithFixes:
model_embeddings.token_embedding = model_embeddings.token_embedding.wrapped
def apply_circular(self, enable):
if self.circular_enabled == enable:
return
self.circular_enabled = enable
for layer in [layer for layer in self.layers if type(layer) == torch.nn.Conv2d]:
layer.padding_mode = 'circular' if enable else 'zeros'
def clear_comments(self):
self.comments = []
def tokenize(self, text):
_, remade_batch_tokens, _, _, _, token_count = self.clip.process_text([text])
return remade_batch_tokens[0], token_count, get_target_prompt_token_count(token_count)
class FrozenCLIPEmbedderWithCustomWords(torch.nn.Module):
def __init__(self, wrapped, hijack):
super().__init__()
self.wrapped = wrapped
self.hijack: StableDiffusionModelHijack = hijack
self.tokenizer = wrapped.tokenizer
self.token_mults = {}
self.comma_token = [v for k, v in self.tokenizer.get_vocab().items() if k == ',</w>'][0]
tokens_with_parens = [(k, v) for k, v in self.tokenizer.get_vocab().items() if '(' in k or ')' in k or '[' in k or ']' in k]
for text, ident in tokens_with_parens:
mult = 1.0
for c in text:
if c == '[':
mult /= 1.1
if c == ']':
mult *= 1.1
if c == '(':
mult *= 1.1
if c == ')':
mult /= 1.1
if mult != 1.0:
self.token_mults[ident] = mult
def tokenize_line(self, line, used_custom_terms, hijack_comments):
id_end = self.wrapped.tokenizer.eos_token_id
if opts.enable_emphasis:
parsed = prompt_parser.parse_prompt_attention(line)
else:
parsed = [[line, 1.0]]
tokenized = self.wrapped.tokenizer([text for text, _ in parsed], truncation=False, add_special_tokens=False)["input_ids"]
fixes = []
remade_tokens = []
multipliers = []
last_comma = -1
for tokens, (text, weight) in zip(tokenized, parsed):
i = 0
while i < len(tokens):
token = tokens[i]
embedding, embedding_length_in_tokens = self.hijack.embedding_db.find_embedding_at_position(tokens, i)
if token == self.comma_token:
last_comma = len(remade_tokens)
elif opts.comma_padding_backtrack != 0 and max(len(remade_tokens), 1) % 75 == 0 and last_comma != -1 and len(remade_tokens) - last_comma <= opts.comma_padding_backtrack:
last_comma += 1
reloc_tokens = remade_tokens[last_comma:]
reloc_mults = multipliers[last_comma:]
remade_tokens = remade_tokens[:last_comma]
length = len(remade_tokens)
rem = int(math.ceil(length / 75)) * 75 - length
remade_tokens += [id_end] * rem + reloc_tokens
multipliers = multipliers[:last_comma] + [1.0] * rem + reloc_mults
if embedding is None:
remade_tokens.append(token)
multipliers.append(weight)
i += 1
else:
emb_len = int(embedding.vec.shape[0])
iteration = len(remade_tokens) // 75
if (len(remade_tokens) + emb_len) // 75 != iteration:
rem = (75 * (iteration + 1) - len(remade_tokens))
remade_tokens += [id_end] * rem
multipliers += [1.0] * rem
iteration += 1
fixes.append((iteration, (len(remade_tokens) % 75, embedding)))
remade_tokens += [0] * emb_len
multipliers += [weight] * emb_len
used_custom_terms.append((embedding.name, embedding.checksum()))
i += embedding_length_in_tokens
token_count = len(remade_tokens)
prompt_target_length = get_target_prompt_token_count(token_count)
tokens_to_add = prompt_target_length - len(remade_tokens)
remade_tokens = remade_tokens + [id_end] * tokens_to_add
multipliers = multipliers + [1.0] * tokens_to_add
return remade_tokens, fixes, multipliers, token_count
def process_text(self, texts):
used_custom_terms = []
remade_batch_tokens = []
hijack_comments = []
hijack_fixes = []
token_count = 0
cache = {}
batch_multipliers = []
for line in texts:
if line in cache:
remade_tokens, fixes, multipliers = cache[line]
else:
remade_tokens, fixes, multipliers, current_token_count = self.tokenize_line(line, used_custom_terms, hijack_comments)
token_count = max(current_token_count, token_count)
cache[line] = (remade_tokens, fixes, multipliers)
remade_batch_tokens.append(remade_tokens)
hijack_fixes.append(fixes)
batch_multipliers.append(multipliers)
return batch_multipliers, remade_batch_tokens, used_custom_terms, hijack_comments, hijack_fixes, token_count
def process_text_old(self, text):
id_start = self.wrapped.tokenizer.bos_token_id
id_end = self.wrapped.tokenizer.eos_token_id
maxlen = self.wrapped.max_length # you get to stay at 77
used_custom_terms = []
remade_batch_tokens = []
overflowing_words = []
hijack_comments = []
hijack_fixes = []
token_count = 0
cache = {}
batch_tokens = self.wrapped.tokenizer(text, truncation=False, add_special_tokens=False)["input_ids"]
batch_multipliers = []
for tokens in batch_tokens:
tuple_tokens = tuple(tokens)
if tuple_tokens in cache:
remade_tokens, fixes, multipliers = cache[tuple_tokens]
else:
fixes = []
remade_tokens = []
multipliers = []
mult = 1.0
i = 0
while i < len(tokens):
token = tokens[i]
embedding, embedding_length_in_tokens = self.hijack.embedding_db.find_embedding_at_position(tokens, i)
mult_change = self.token_mults.get(token) if opts.enable_emphasis else None
if mult_change is not None:
mult *= mult_change
i += 1
elif embedding is None:
remade_tokens.append(token)
multipliers.append(mult)
i += 1
else:
emb_len = int(embedding.vec.shape[0])
fixes.append((len(remade_tokens), embedding))
remade_tokens += [0] * emb_len
multipliers += [mult] * emb_len
used_custom_terms.append((embedding.name, embedding.checksum()))
i += embedding_length_in_tokens
if len(remade_tokens) > maxlen - 2:
vocab = {v: k for k, v in self.wrapped.tokenizer.get_vocab().items()}
ovf = remade_tokens[maxlen - 2:]
overflowing_words = [vocab.get(int(x), "") for x in ovf]
overflowing_text = self.wrapped.tokenizer.convert_tokens_to_string(''.join(overflowing_words))
hijack_comments.append(f"Warning: too many input tokens; some ({len(overflowing_words)}) have been truncated:\n{overflowing_text}\n")
token_count = len(remade_tokens)
remade_tokens = remade_tokens + [id_end] * (maxlen - 2 - len(remade_tokens))
remade_tokens = [id_start] + remade_tokens[0:maxlen-2] + [id_end]
cache[tuple_tokens] = (remade_tokens, fixes, multipliers)
multipliers = multipliers + [1.0] * (maxlen - 2 - len(multipliers))
multipliers = [1.0] + multipliers[0:maxlen - 2] + [1.0]
remade_batch_tokens.append(remade_tokens)
hijack_fixes.append(fixes)
batch_multipliers.append(multipliers)
return batch_multipliers, remade_batch_tokens, used_custom_terms, hijack_comments, hijack_fixes, token_count
def forward(self, text):
use_old = opts.use_old_emphasis_implementation
if use_old:
batch_multipliers, remade_batch_tokens, used_custom_terms, hijack_comments, hijack_fixes, token_count = self.process_text_old(text)
else:
batch_multipliers, remade_batch_tokens, used_custom_terms, hijack_comments, hijack_fixes, token_count = self.process_text(text)
self.hijack.comments += hijack_comments
if len(used_custom_terms) > 0:
self.hijack.comments.append("Used embeddings: " + ", ".join([f'{word} [{checksum}]' for word, checksum in used_custom_terms]))
if use_old:
self.hijack.fixes = hijack_fixes
return self.process_tokens(remade_batch_tokens, batch_multipliers)
z = None
i = 0
while max(map(len, remade_batch_tokens)) != 0:
rem_tokens = [x[75:] for x in remade_batch_tokens]
rem_multipliers = [x[75:] for x in batch_multipliers]
self.hijack.fixes = []
for unfiltered in hijack_fixes:
fixes = []
for fix in unfiltered:
if fix[0] == i:
fixes.append(fix[1])
self.hijack.fixes.append(fixes)
tokens = []
multipliers = []
for i in range(len(remade_batch_tokens)):
if len(remade_batch_tokens[i]) > 0:
tokens.append(remade_batch_tokens[i][:75])
multipliers.append(batch_multipliers[i][:75])
else:
tokens.append([self.wrapped.tokenizer.eos_token_id] * 75)
multipliers.append([1.0] * 75)
z1 = self.process_tokens(tokens, multipliers)
z = z1 if z is None else torch.cat((z, z1), axis=-2)
remade_batch_tokens = rem_tokens
batch_multipliers = rem_multipliers
i += 1
return z
def process_tokens(self, remade_batch_tokens, batch_multipliers):
if not opts.use_old_emphasis_implementation:
remade_batch_tokens = [[self.wrapped.tokenizer.bos_token_id] + x[:75] + [self.wrapped.tokenizer.eos_token_id] for x in remade_batch_tokens]
batch_multipliers = [[1.0] + x[:75] + [1.0] for x in batch_multipliers]
tokens = torch.asarray(remade_batch_tokens).to(device)
outputs = self.wrapped.transformer(input_ids=tokens, output_hidden_states=-opts.CLIP_stop_at_last_layers)
if opts.CLIP_stop_at_last_layers > 1:
z = outputs.hidden_states[-opts.CLIP_stop_at_last_layers]
z = self.wrapped.transformer.text_model.final_layer_norm(z)
else:
z = outputs.last_hidden_state
# restoring original mean is likely not correct, but it seems to work well to prevent artifacts that happen otherwise
batch_multipliers_of_same_length = [x + [1.0] * (75 - len(x)) for x in batch_multipliers]
batch_multipliers = torch.asarray(batch_multipliers_of_same_length).to(device)
original_mean = z.mean()
z *= batch_multipliers.reshape(batch_multipliers.shape + (1,)).expand(z.shape)
new_mean = z.mean()
z *= original_mean / new_mean
return z
class EmbeddingsWithFixes(torch.nn.Module):
def __init__(self, wrapped, embeddings):
super().__init__()
self.wrapped = wrapped
self.embeddings = embeddings
def forward(self, input_ids):
batch_fixes = self.embeddings.fixes
self.embeddings.fixes = None
inputs_embeds = self.wrapped(input_ids)
if batch_fixes is None or len(batch_fixes) == 0 or max([len(x) for x in batch_fixes]) == 0:
return inputs_embeds
vecs = []
for fixes, tensor in zip(batch_fixes, inputs_embeds):
for offset, embedding in fixes:
emb = embedding.vec
emb_len = min(tensor.shape[0]-offset-1, emb.shape[0])
tensor = torch.cat([tensor[0:offset+1], emb[0:emb_len], tensor[offset+1+emb_len:]])
vecs.append(tensor)
return torch.stack(vecs)
def add_circular_option_to_conv_2d():
conv2d_constructor = torch.nn.Conv2d.__init__
def conv2d_constructor_circular(self, *args, **kwargs):
return conv2d_constructor(self, *args, padding_mode='circular', **kwargs)
torch.nn.Conv2d.__init__ = conv2d_constructor_circular
model_hijack = StableDiffusionModelHijack()
<|code_end|>
|
Save feature fails until output directories created manually
**Describe the bug**
Save feature of txt2img panel does not work until output directories are created manually
**To Reproduce**
Steps to reproduce the behavior:
1. Check out a clean copy of the project add the model file and launch it via `webui.bat`, wait for downloads to complete
2. Load the app in the browser
3. Focus the `txt2img` tab focused (should already be by default)
4. Enter any prompt and click the `Generate` button
5. Wait for the images to complete
6. Click the `Save` button below the generated images
**Expected behavior**
1. Images should be saved to `log/images`
2. CSV should be created/updated with info about the images
**Actual behavior**
1. Save info panel displays a red `Error` indicator
2. In the server output, save fails due to the output directory not existing
Log Output:
```bash
Traceback (most recent call last):
File "<PATH>\modules\ui.py", line 176, in f
res = list(func(*args, **kwargs))
File "<PATH>\modules\ui.py", line 128, in save_files
with open(os.path.join(opts.outdir_save, "log.csv"), "a", encoding="utf8", newline='') as file:
FileNotFoundError: [Errno 2] No such file or directory: 'log/images\\log.csv'
Traceback (most recent call last):
File "<PATH>\venv\lib\site-packages\gradio\routes.py", line 273, in run_predict
output = await app.blocks.process_api(
File "<PATH>\venv\lib\site-packages\gradio\blocks.py", line 746, in process_api
predictions = self.postprocess_data(fn_index, result["prediction"], state)
File "<PATH>\venv\lib\site-packages\gradio\blocks.py", line 700, in postprocess_data
if predictions[i] is components._Keywords.FINISHED_ITERATING:
IndexError: tuple index out of range
```
**Desktop (please complete the following information):**
- OS: Windows
- Browser: Chrome
- Commit revision: 42bf5fa3256bff5e4640e5a626e750d4e49e01e1
**Additional context**
After manually creating the `log` and `images` folders, save works as expected
|
modules/ui.py
<|code_start|>import base64
import html
import io
import json
import math
import mimetypes
import os
import random
import sys
import time
import traceback
import platform
import subprocess as sp
from functools import reduce
import numpy as np
import torch
from PIL import Image, PngImagePlugin
import piexif
import gradio as gr
import gradio.utils
import gradio.routes
from modules import sd_hijack
from modules.paths import script_path
from modules.shared import opts, cmd_opts
if cmd_opts.deepdanbooru:
from modules.deepbooru import get_deepbooru_tags
import modules.shared as shared
from modules.sd_samplers import samplers, samplers_for_img2img
from modules.sd_hijack import model_hijack
import modules.ldsr_model
import modules.scripts
import modules.gfpgan_model
import modules.codeformer_model
import modules.styles
import modules.generation_parameters_copypaste
from modules import prompt_parser
from modules.images import save_image
import modules.textual_inversion.ui
import modules.hypernetworks.ui
# this is a fix for Windows users. Without it, javascript files will be served with text/html content-type and the browser will not show any UI
mimetypes.init()
mimetypes.add_type('application/javascript', '.js')
if not cmd_opts.share and not cmd_opts.listen:
# fix gradio phoning home
gradio.utils.version_check = lambda: None
gradio.utils.get_local_ip_address = lambda: '127.0.0.1'
if cmd_opts.ngrok != None:
import modules.ngrok as ngrok
print('ngrok authtoken detected, trying to connect...')
ngrok.connect(cmd_opts.ngrok, cmd_opts.port if cmd_opts.port != None else 7860)
def gr_show(visible=True):
return {"visible": visible, "__type__": "update"}
sample_img2img = "assets/stable-samples/img2img/sketch-mountains-input.jpg"
sample_img2img = sample_img2img if os.path.exists(sample_img2img) else None
css_hide_progressbar = """
.wrap .m-12 svg { display:none!important; }
.wrap .m-12::before { content:"Loading..." }
.progress-bar { display:none!important; }
.meta-text { display:none!important; }
"""
# Using constants for these since the variation selector isn't visible.
# Important that they exactly match script.js for tooltip to work.
random_symbol = '\U0001f3b2\ufe0f' # 🎲️
reuse_symbol = '\u267b\ufe0f' # ♻️
art_symbol = '\U0001f3a8' # 🎨
paste_symbol = '\u2199\ufe0f' # ↙
folder_symbol = '\U0001f4c2' # 📂
def plaintext_to_html(text):
text = "<p>" + "<br>\n".join([f"{html.escape(x)}" for x in text.split('\n')]) + "</p>"
return text
def image_from_url_text(filedata):
if type(filedata) == list:
if len(filedata) == 0:
return None
filedata = filedata[0]
if filedata.startswith("data:image/png;base64,"):
filedata = filedata[len("data:image/png;base64,"):]
filedata = base64.decodebytes(filedata.encode('utf-8'))
image = Image.open(io.BytesIO(filedata))
return image
def send_gradio_gallery_to_image(x):
if len(x) == 0:
return None
return image_from_url_text(x[0])
def save_files(js_data, images, do_make_zip, index):
import csv
filenames = []
fullfns = []
#quick dictionary to class object conversion. Its necessary due apply_filename_pattern requiring it
class MyObject:
def __init__(self, d=None):
if d is not None:
for key, value in d.items():
setattr(self, key, value)
data = json.loads(js_data)
p = MyObject(data)
path = opts.outdir_save
save_to_dirs = opts.use_save_to_dirs_for_ui
extension: str = opts.samples_format
start_index = 0
if index > -1 and opts.save_selected_only and (index >= data["index_of_first_image"]): # ensures we are looking at a specific non-grid picture, and we have save_selected_only
images = [images[index]]
start_index = index
with open(os.path.join(opts.outdir_save, "log.csv"), "a", encoding="utf8", newline='') as file:
at_start = file.tell() == 0
writer = csv.writer(file)
if at_start:
writer.writerow(["prompt", "seed", "width", "height", "sampler", "cfgs", "steps", "filename", "negative_prompt"])
for image_index, filedata in enumerate(images, start_index):
if filedata.startswith("data:image/png;base64,"):
filedata = filedata[len("data:image/png;base64,"):]
image = Image.open(io.BytesIO(base64.decodebytes(filedata.encode('utf-8'))))
is_grid = image_index < p.index_of_first_image
i = 0 if is_grid else (image_index - p.index_of_first_image)
fullfn, txt_fullfn = save_image(image, path, "", seed=p.all_seeds[i], prompt=p.all_prompts[i], extension=extension, info=p.infotexts[image_index], grid=is_grid, p=p, save_to_dirs=save_to_dirs)
filename = os.path.relpath(fullfn, path)
filenames.append(filename)
fullfns.append(fullfn)
if txt_fullfn:
filenames.append(os.path.basename(txt_fullfn))
fullfns.append(txt_fullfn)
writer.writerow([data["prompt"], data["seed"], data["width"], data["height"], data["sampler"], data["cfg_scale"], data["steps"], filenames[0], data["negative_prompt"]])
# Make Zip
if do_make_zip:
zip_filepath = os.path.join(path, "images.zip")
from zipfile import ZipFile
with ZipFile(zip_filepath, "w") as zip_file:
for i in range(len(fullfns)):
with open(fullfns[i], mode="rb") as f:
zip_file.writestr(filenames[i], f.read())
fullfns.insert(0, zip_filepath)
return gr.File.update(value=fullfns, visible=True), '', '', plaintext_to_html(f"Saved: {filenames[0]}")
def wrap_gradio_call(func, extra_outputs=None):
def f(*args, extra_outputs_array=extra_outputs, **kwargs):
run_memmon = opts.memmon_poll_rate > 0 and not shared.mem_mon.disabled
if run_memmon:
shared.mem_mon.monitor()
t = time.perf_counter()
try:
res = list(func(*args, **kwargs))
except Exception as e:
print("Error completing request", file=sys.stderr)
print("Arguments:", args, kwargs, file=sys.stderr)
print(traceback.format_exc(), file=sys.stderr)
shared.state.job = ""
shared.state.job_count = 0
if extra_outputs_array is None:
extra_outputs_array = [None, '']
res = extra_outputs_array + [f"<div class='error'>{plaintext_to_html(type(e).__name__+': '+str(e))}</div>"]
elapsed = time.perf_counter() - t
elapsed_m = int(elapsed // 60)
elapsed_s = elapsed % 60
elapsed_text = f"{elapsed_s:.2f}s"
if (elapsed_m > 0):
elapsed_text = f"{elapsed_m}m "+elapsed_text
if run_memmon:
mem_stats = {k: -(v//-(1024*1024)) for k, v in shared.mem_mon.stop().items()}
active_peak = mem_stats['active_peak']
reserved_peak = mem_stats['reserved_peak']
sys_peak = mem_stats['system_peak']
sys_total = mem_stats['total']
sys_pct = round(sys_peak/max(sys_total, 1) * 100, 2)
vram_html = f"<p class='vram'>Torch active/reserved: {active_peak}/{reserved_peak} MiB, <wbr>Sys VRAM: {sys_peak}/{sys_total} MiB ({sys_pct}%)</p>"
else:
vram_html = ''
# last item is always HTML
res[-1] += f"<div class='performance'><p class='time'>Time taken: <wbr>{elapsed_text}</p>{vram_html}</div>"
shared.state.skipped = False
shared.state.interrupted = False
shared.state.job_count = 0
return tuple(res)
return f
def check_progress_call(id_part):
if shared.state.job_count == 0:
return "", gr_show(False), gr_show(False), gr_show(False)
progress = 0
if shared.state.job_count > 0:
progress += shared.state.job_no / shared.state.job_count
if shared.state.sampling_steps > 0:
progress += 1 / shared.state.job_count * shared.state.sampling_step / shared.state.sampling_steps
progress = min(progress, 1)
progressbar = ""
if opts.show_progressbar:
progressbar = f"""<div class='progressDiv'><div class='progress' style="width:{progress * 100}%">{str(int(progress*100))+"%" if progress > 0.01 else ""}</div></div>"""
image = gr_show(False)
preview_visibility = gr_show(False)
if opts.show_progress_every_n_steps > 0:
if shared.parallel_processing_allowed:
if shared.state.sampling_step - shared.state.current_image_sampling_step >= opts.show_progress_every_n_steps and shared.state.current_latent is not None:
shared.state.current_image = modules.sd_samplers.sample_to_image(shared.state.current_latent)
shared.state.current_image_sampling_step = shared.state.sampling_step
image = shared.state.current_image
if image is None:
image = gr.update(value=None)
else:
preview_visibility = gr_show(True)
if shared.state.textinfo is not None:
textinfo_result = gr.HTML.update(value=shared.state.textinfo, visible=True)
else:
textinfo_result = gr_show(False)
return f"<span id='{id_part}_progress_span' style='display: none'>{time.time()}</span><p>{progressbar}</p>", preview_visibility, image, textinfo_result
def check_progress_call_initial(id_part):
shared.state.job_count = -1
shared.state.current_latent = None
shared.state.current_image = None
shared.state.textinfo = None
return check_progress_call(id_part)
def roll_artist(prompt):
allowed_cats = set([x for x in shared.artist_db.categories() if len(opts.random_artist_categories)==0 or x in opts.random_artist_categories])
artist = random.choice([x for x in shared.artist_db.artists if x.category in allowed_cats])
return prompt + ", " + artist.name if prompt != '' else artist.name
def visit(x, func, path=""):
if hasattr(x, 'children'):
for c in x.children:
visit(c, func, path)
elif x.label is not None:
func(path + "/" + str(x.label), x)
def add_style(name: str, prompt: str, negative_prompt: str):
if name is None:
return [gr_show(), gr_show()]
style = modules.styles.PromptStyle(name, prompt, negative_prompt)
shared.prompt_styles.styles[style.name] = style
# Save all loaded prompt styles: this allows us to update the storage format in the future more easily, because we
# reserialize all styles every time we save them
shared.prompt_styles.save_styles(shared.styles_filename)
return [gr.Dropdown.update(visible=True, choices=list(shared.prompt_styles.styles)) for _ in range(4)]
def apply_styles(prompt, prompt_neg, style1_name, style2_name):
prompt = shared.prompt_styles.apply_styles_to_prompt(prompt, [style1_name, style2_name])
prompt_neg = shared.prompt_styles.apply_negative_styles_to_prompt(prompt_neg, [style1_name, style2_name])
return [gr.Textbox.update(value=prompt), gr.Textbox.update(value=prompt_neg), gr.Dropdown.update(value="None"), gr.Dropdown.update(value="None")]
def interrogate(image):
prompt = shared.interrogator.interrogate(image)
return gr_show(True) if prompt is None else prompt
def interrogate_deepbooru(image):
prompt = get_deepbooru_tags(image, opts.interrogate_deepbooru_score_threshold)
return gr_show(True) if prompt is None else prompt
def create_seed_inputs():
with gr.Row():
with gr.Box():
with gr.Row(elem_id='seed_row'):
seed = (gr.Textbox if cmd_opts.use_textbox_seed else gr.Number)(label='Seed', value=-1)
seed.style(container=False)
random_seed = gr.Button(random_symbol, elem_id='random_seed')
reuse_seed = gr.Button(reuse_symbol, elem_id='reuse_seed')
with gr.Box(elem_id='subseed_show_box'):
seed_checkbox = gr.Checkbox(label='Extra', elem_id='subseed_show', value=False)
# Components to show/hide based on the 'Extra' checkbox
seed_extras = []
with gr.Row(visible=False) as seed_extra_row_1:
seed_extras.append(seed_extra_row_1)
with gr.Box():
with gr.Row(elem_id='subseed_row'):
subseed = gr.Number(label='Variation seed', value=-1)
subseed.style(container=False)
random_subseed = gr.Button(random_symbol, elem_id='random_subseed')
reuse_subseed = gr.Button(reuse_symbol, elem_id='reuse_subseed')
subseed_strength = gr.Slider(label='Variation strength', value=0.0, minimum=0, maximum=1, step=0.01)
with gr.Row(visible=False) as seed_extra_row_2:
seed_extras.append(seed_extra_row_2)
seed_resize_from_w = gr.Slider(minimum=0, maximum=2048, step=64, label="Resize seed from width", value=0)
seed_resize_from_h = gr.Slider(minimum=0, maximum=2048, step=64, label="Resize seed from height", value=0)
random_seed.click(fn=lambda: -1, show_progress=False, inputs=[], outputs=[seed])
random_subseed.click(fn=lambda: -1, show_progress=False, inputs=[], outputs=[subseed])
def change_visibility(show):
return {comp: gr_show(show) for comp in seed_extras}
seed_checkbox.change(change_visibility, show_progress=False, inputs=[seed_checkbox], outputs=seed_extras)
return seed, reuse_seed, subseed, reuse_subseed, subseed_strength, seed_resize_from_h, seed_resize_from_w, seed_checkbox
def connect_reuse_seed(seed: gr.Number, reuse_seed: gr.Button, generation_info: gr.Textbox, dummy_component, is_subseed):
""" Connects a 'reuse (sub)seed' button's click event so that it copies last used
(sub)seed value from generation info the to the seed field. If copying subseed and subseed strength
was 0, i.e. no variation seed was used, it copies the normal seed value instead."""
def copy_seed(gen_info_string: str, index):
res = -1
try:
gen_info = json.loads(gen_info_string)
index -= gen_info.get('index_of_first_image', 0)
if is_subseed and gen_info.get('subseed_strength', 0) > 0:
all_subseeds = gen_info.get('all_subseeds', [-1])
res = all_subseeds[index if 0 <= index < len(all_subseeds) else 0]
else:
all_seeds = gen_info.get('all_seeds', [-1])
res = all_seeds[index if 0 <= index < len(all_seeds) else 0]
except json.decoder.JSONDecodeError as e:
if gen_info_string != '':
print("Error parsing JSON generation info:", file=sys.stderr)
print(gen_info_string, file=sys.stderr)
return [res, gr_show(False)]
reuse_seed.click(
fn=copy_seed,
_js="(x, y) => [x, selected_gallery_index()]",
show_progress=False,
inputs=[generation_info, dummy_component],
outputs=[seed, dummy_component]
)
def update_token_counter(text, steps):
try:
_, prompt_flat_list, _ = prompt_parser.get_multicond_prompt_list([text])
prompt_schedules = prompt_parser.get_learned_conditioning_prompt_schedules(prompt_flat_list, steps)
except Exception:
# a parsing error can happen here during typing, and we don't want to bother the user with
# messages related to it in console
prompt_schedules = [[[steps, text]]]
flat_prompts = reduce(lambda list1, list2: list1+list2, prompt_schedules)
prompts = [prompt_text for step, prompt_text in flat_prompts]
tokens, token_count, max_length = max([model_hijack.tokenize(prompt) for prompt in prompts], key=lambda args: args[1])
style_class = ' class="red"' if (token_count > max_length) else ""
return f"<span {style_class}>{token_count}/{max_length}</span>"
def create_toprow(is_img2img):
id_part = "img2img" if is_img2img else "txt2img"
with gr.Row(elem_id="toprow"):
with gr.Column(scale=4):
with gr.Row():
with gr.Column(scale=80):
with gr.Row():
prompt = gr.Textbox(label="Prompt", elem_id=f"{id_part}_prompt", show_label=False, placeholder="Prompt", lines=2)
with gr.Column(scale=1, elem_id="roll_col"):
roll = gr.Button(value=art_symbol, elem_id="roll", visible=len(shared.artist_db.artists) > 0)
paste = gr.Button(value=paste_symbol, elem_id="paste")
token_counter = gr.HTML(value="<span></span>", elem_id=f"{id_part}_token_counter")
token_button = gr.Button(visible=False, elem_id=f"{id_part}_token_button")
with gr.Column(scale=10, elem_id="style_pos_col"):
prompt_style = gr.Dropdown(label="Style 1", elem_id=f"{id_part}_style_index", choices=[k for k, v in shared.prompt_styles.styles.items()], value=next(iter(shared.prompt_styles.styles.keys())), visible=len(shared.prompt_styles.styles) > 1)
with gr.Row():
with gr.Column(scale=8):
with gr.Row():
negative_prompt = gr.Textbox(label="Negative prompt", elem_id="negative_prompt", show_label=False, placeholder="Negative prompt", lines=2)
with gr.Column(scale=1, elem_id="roll_col"):
sh = gr.Button(elem_id="sh", visible=True)
with gr.Column(scale=1, elem_id="style_neg_col"):
prompt_style2 = gr.Dropdown(label="Style 2", elem_id=f"{id_part}_style2_index", choices=[k for k, v in shared.prompt_styles.styles.items()], value=next(iter(shared.prompt_styles.styles.keys())), visible=len(shared.prompt_styles.styles) > 1)
with gr.Column(scale=1):
with gr.Row():
skip = gr.Button('Skip', elem_id=f"{id_part}_skip")
interrupt = gr.Button('Interrupt', elem_id=f"{id_part}_interrupt")
submit = gr.Button('Generate', elem_id=f"{id_part}_generate", variant='primary')
skip.click(
fn=lambda: shared.state.skip(),
inputs=[],
outputs=[],
)
interrupt.click(
fn=lambda: shared.state.interrupt(),
inputs=[],
outputs=[],
)
with gr.Row(scale=1):
if is_img2img:
interrogate = gr.Button('Interrogate\nCLIP', elem_id="interrogate")
if cmd_opts.deepdanbooru:
deepbooru = gr.Button('Interrogate\nDeepBooru', elem_id="deepbooru")
else:
deepbooru = None
else:
interrogate = None
deepbooru = None
prompt_style_apply = gr.Button('Apply style', elem_id="style_apply")
save_style = gr.Button('Create style', elem_id="style_create")
return prompt, roll, prompt_style, negative_prompt, prompt_style2, submit, interrogate, deepbooru, prompt_style_apply, save_style, paste, token_counter, token_button
def setup_progressbar(progressbar, preview, id_part, textinfo=None):
if textinfo is None:
textinfo = gr.HTML(visible=False)
check_progress = gr.Button('Check progress', elem_id=f"{id_part}_check_progress", visible=False)
check_progress.click(
fn=lambda: check_progress_call(id_part),
show_progress=False,
inputs=[],
outputs=[progressbar, preview, preview, textinfo],
)
check_progress_initial = gr.Button('Check progress (first)', elem_id=f"{id_part}_check_progress_initial", visible=False)
check_progress_initial.click(
fn=lambda: check_progress_call_initial(id_part),
show_progress=False,
inputs=[],
outputs=[progressbar, preview, preview, textinfo],
)
def create_ui(wrap_gradio_gpu_call):
import modules.img2img
import modules.txt2img
with gr.Blocks(analytics_enabled=False) as txt2img_interface:
txt2img_prompt, roll, txt2img_prompt_style, txt2img_negative_prompt, txt2img_prompt_style2, submit, _, _, txt2img_prompt_style_apply, txt2img_save_style, paste, token_counter, token_button = create_toprow(is_img2img=False)
dummy_component = gr.Label(visible=False)
with gr.Row(elem_id='txt2img_progress_row'):
with gr.Column(scale=1):
pass
with gr.Column(scale=1):
progressbar = gr.HTML(elem_id="txt2img_progressbar")
txt2img_preview = gr.Image(elem_id='txt2img_preview', visible=False)
setup_progressbar(progressbar, txt2img_preview, 'txt2img')
with gr.Row().style(equal_height=False):
with gr.Column(variant='panel'):
steps = gr.Slider(minimum=1, maximum=150, step=1, label="Sampling Steps", value=20)
sampler_index = gr.Radio(label='Sampling method', elem_id="txt2img_sampling", choices=[x.name for x in samplers], value=samplers[0].name, type="index")
with gr.Group():
width = gr.Slider(minimum=64, maximum=2048, step=64, label="Width", value=512)
height = gr.Slider(minimum=64, maximum=2048, step=64, label="Height", value=512)
with gr.Row():
restore_faces = gr.Checkbox(label='Restore faces', value=False, visible=len(shared.face_restorers) > 1)
tiling = gr.Checkbox(label='Tiling', value=False)
enable_hr = gr.Checkbox(label='Highres. fix', value=False)
with gr.Row(visible=False) as hr_options:
scale_latent = gr.Checkbox(label='Scale latent', value=False)
denoising_strength = gr.Slider(minimum=0.0, maximum=1.0, step=0.01, label='Denoising strength', value=0.7)
with gr.Row():
batch_count = gr.Slider(minimum=1, step=1, label='Batch count', value=1)
batch_size = gr.Slider(minimum=1, maximum=8, step=1, label='Batch size', value=1)
cfg_scale = gr.Slider(minimum=1.0, maximum=30.0, step=0.5, label='CFG Scale', value=7.0)
seed, reuse_seed, subseed, reuse_subseed, subseed_strength, seed_resize_from_h, seed_resize_from_w, seed_checkbox = create_seed_inputs()
with gr.Group():
custom_inputs = modules.scripts.scripts_txt2img.setup_ui(is_img2img=False)
with gr.Column(variant='panel'):
with gr.Group():
txt2img_preview = gr.Image(elem_id='txt2img_preview', visible=False)
txt2img_gallery = gr.Gallery(label='Output', show_label=False, elem_id='txt2img_gallery').style(grid=4)
with gr.Group():
with gr.Row():
save = gr.Button('Save')
send_to_img2img = gr.Button('Send to img2img')
send_to_inpaint = gr.Button('Send to inpaint')
send_to_extras = gr.Button('Send to extras')
button_id = "hidden_element" if shared.cmd_opts.hide_ui_dir_config else 'open_folder'
open_txt2img_folder = gr.Button(folder_symbol, elem_id=button_id)
with gr.Row():
do_make_zip = gr.Checkbox(label="Make Zip when Save?", value=False)
with gr.Row():
download_files = gr.File(None, file_count="multiple", interactive=False, show_label=False, visible=False)
with gr.Group():
html_info = gr.HTML()
generation_info = gr.Textbox(visible=False)
connect_reuse_seed(seed, reuse_seed, generation_info, dummy_component, is_subseed=False)
connect_reuse_seed(subseed, reuse_subseed, generation_info, dummy_component, is_subseed=True)
txt2img_args = dict(
fn=wrap_gradio_gpu_call(modules.txt2img.txt2img),
_js="submit",
inputs=[
txt2img_prompt,
txt2img_negative_prompt,
txt2img_prompt_style,
txt2img_prompt_style2,
steps,
sampler_index,
restore_faces,
tiling,
batch_count,
batch_size,
cfg_scale,
seed,
subseed, subseed_strength, seed_resize_from_h, seed_resize_from_w, seed_checkbox,
height,
width,
enable_hr,
scale_latent,
denoising_strength,
] + custom_inputs,
outputs=[
txt2img_gallery,
generation_info,
html_info
],
show_progress=False,
)
txt2img_prompt.submit(**txt2img_args)
submit.click(**txt2img_args)
enable_hr.change(
fn=lambda x: gr_show(x),
inputs=[enable_hr],
outputs=[hr_options],
)
save.click(
fn=wrap_gradio_call(save_files),
_js="(x, y, z, w) => [x, y, z, selected_gallery_index()]",
inputs=[
generation_info,
txt2img_gallery,
do_make_zip,
html_info,
],
outputs=[
download_files,
html_info,
html_info,
html_info,
]
)
roll.click(
fn=roll_artist,
_js="update_txt2img_tokens",
inputs=[
txt2img_prompt,
],
outputs=[
txt2img_prompt,
]
)
txt2img_paste_fields = [
(txt2img_prompt, "Prompt"),
(txt2img_negative_prompt, "Negative prompt"),
(steps, "Steps"),
(sampler_index, "Sampler"),
(restore_faces, "Face restoration"),
(cfg_scale, "CFG scale"),
(seed, "Seed"),
(width, "Size-1"),
(height, "Size-2"),
(batch_size, "Batch size"),
(subseed, "Variation seed"),
(subseed_strength, "Variation seed strength"),
(seed_resize_from_w, "Seed resize from-1"),
(seed_resize_from_h, "Seed resize from-2"),
(denoising_strength, "Denoising strength"),
(enable_hr, lambda d: "Denoising strength" in d),
(hr_options, lambda d: gr.Row.update(visible="Denoising strength" in d)),
]
modules.generation_parameters_copypaste.connect_paste(paste, txt2img_paste_fields, txt2img_prompt)
token_button.click(fn=update_token_counter, inputs=[txt2img_prompt, steps], outputs=[token_counter])
with gr.Blocks(analytics_enabled=False) as img2img_interface:
img2img_prompt, roll, img2img_prompt_style, img2img_negative_prompt, img2img_prompt_style2, submit, img2img_interrogate, img2img_deepbooru, img2img_prompt_style_apply, img2img_save_style, paste, token_counter, token_button = create_toprow(is_img2img=True)
with gr.Row(elem_id='img2img_progress_row'):
with gr.Column(scale=1):
pass
with gr.Column(scale=1):
progressbar = gr.HTML(elem_id="img2img_progressbar")
img2img_preview = gr.Image(elem_id='img2img_preview', visible=False)
setup_progressbar(progressbar, img2img_preview, 'img2img')
with gr.Row().style(equal_height=False):
with gr.Column(variant='panel'):
with gr.Tabs(elem_id="mode_img2img") as tabs_img2img_mode:
with gr.TabItem('img2img', id='img2img'):
init_img = gr.Image(label="Image for img2img", elem_id="img2img_image", show_label=False, source="upload", interactive=True, type="pil", tool=cmd_opts.gradio_img2img_tool)
with gr.TabItem('Inpaint', id='inpaint'):
init_img_with_mask = gr.Image(label="Image for inpainting with mask", show_label=False, elem_id="img2maskimg", source="upload", interactive=True, type="pil", tool="sketch", image_mode="RGBA")
init_img_inpaint = gr.Image(label="Image for img2img", show_label=False, source="upload", interactive=True, type="pil", visible=False, elem_id="img_inpaint_base")
init_mask_inpaint = gr.Image(label="Mask", source="upload", interactive=True, type="pil", visible=False, elem_id="img_inpaint_mask")
mask_blur = gr.Slider(label='Mask blur', minimum=0, maximum=64, step=1, value=4)
with gr.Row():
mask_mode = gr.Radio(label="Mask mode", show_label=False, choices=["Draw mask", "Upload mask"], type="index", value="Draw mask", elem_id="mask_mode")
inpainting_mask_invert = gr.Radio(label='Masking mode', show_label=False, choices=['Inpaint masked', 'Inpaint not masked'], value='Inpaint masked', type="index")
inpainting_fill = gr.Radio(label='Masked content', choices=['fill', 'original', 'latent noise', 'latent nothing'], value='original', type="index")
with gr.Row():
inpaint_full_res = gr.Checkbox(label='Inpaint at full resolution', value=False)
inpaint_full_res_padding = gr.Slider(label='Inpaint at full resolution padding, pixels', minimum=0, maximum=256, step=4, value=32)
with gr.TabItem('Batch img2img', id='batch'):
hidden = '<br>Disabled when launched with --hide-ui-dir-config.' if shared.cmd_opts.hide_ui_dir_config else ''
gr.HTML(f"<p class=\"text-gray-500\">Process images in a directory on the same machine where the server is running.<br>Use an empty output directory to save pictures normally instead of writing to the output directory.{hidden}</p>")
img2img_batch_input_dir = gr.Textbox(label="Input directory", **shared.hide_dirs)
img2img_batch_output_dir = gr.Textbox(label="Output directory", **shared.hide_dirs)
with gr.Row():
resize_mode = gr.Radio(label="Resize mode", elem_id="resize_mode", show_label=False, choices=["Just resize", "Crop and resize", "Resize and fill"], type="index", value="Just resize")
steps = gr.Slider(minimum=1, maximum=150, step=1, label="Sampling Steps", value=20)
sampler_index = gr.Radio(label='Sampling method', choices=[x.name for x in samplers_for_img2img], value=samplers_for_img2img[0].name, type="index")
with gr.Group():
width = gr.Slider(minimum=64, maximum=2048, step=64, label="Width", value=512)
height = gr.Slider(minimum=64, maximum=2048, step=64, label="Height", value=512)
with gr.Row():
restore_faces = gr.Checkbox(label='Restore faces', value=False, visible=len(shared.face_restorers) > 1)
tiling = gr.Checkbox(label='Tiling', value=False)
with gr.Row():
batch_count = gr.Slider(minimum=1, step=1, label='Batch count', value=1)
batch_size = gr.Slider(minimum=1, maximum=8, step=1, label='Batch size', value=1)
with gr.Group():
cfg_scale = gr.Slider(minimum=1.0, maximum=30.0, step=0.5, label='CFG Scale', value=7.0)
denoising_strength = gr.Slider(minimum=0.0, maximum=1.0, step=0.01, label='Denoising strength', value=0.75)
seed, reuse_seed, subseed, reuse_subseed, subseed_strength, seed_resize_from_h, seed_resize_from_w, seed_checkbox = create_seed_inputs()
with gr.Group():
custom_inputs = modules.scripts.scripts_img2img.setup_ui(is_img2img=True)
with gr.Column(variant='panel'):
with gr.Group():
img2img_preview = gr.Image(elem_id='img2img_preview', visible=False)
img2img_gallery = gr.Gallery(label='Output', show_label=False, elem_id='img2img_gallery').style(grid=4)
with gr.Group():
with gr.Row():
save = gr.Button('Save')
img2img_send_to_img2img = gr.Button('Send to img2img')
img2img_send_to_inpaint = gr.Button('Send to inpaint')
img2img_send_to_extras = gr.Button('Send to extras')
button_id = "hidden_element" if shared.cmd_opts.hide_ui_dir_config else 'open_folder'
open_img2img_folder = gr.Button(folder_symbol, elem_id=button_id)
with gr.Row():
do_make_zip = gr.Checkbox(label="Make Zip when Save?", value=False)
with gr.Row():
download_files = gr.File(None, file_count="multiple", interactive=False, show_label=False, visible=False)
with gr.Group():
html_info = gr.HTML()
generation_info = gr.Textbox(visible=False)
connect_reuse_seed(seed, reuse_seed, generation_info, dummy_component, is_subseed=False)
connect_reuse_seed(subseed, reuse_subseed, generation_info, dummy_component, is_subseed=True)
mask_mode.change(
lambda mode, img: {
init_img_with_mask: gr_show(mode == 0),
init_img_inpaint: gr_show(mode == 1),
init_mask_inpaint: gr_show(mode == 1),
},
inputs=[mask_mode, init_img_with_mask],
outputs=[
init_img_with_mask,
init_img_inpaint,
init_mask_inpaint,
],
)
img2img_args = dict(
fn=wrap_gradio_gpu_call(modules.img2img.img2img),
_js="submit_img2img",
inputs=[
dummy_component,
img2img_prompt,
img2img_negative_prompt,
img2img_prompt_style,
img2img_prompt_style2,
init_img,
init_img_with_mask,
init_img_inpaint,
init_mask_inpaint,
mask_mode,
steps,
sampler_index,
mask_blur,
inpainting_fill,
restore_faces,
tiling,
batch_count,
batch_size,
cfg_scale,
denoising_strength,
seed,
subseed, subseed_strength, seed_resize_from_h, seed_resize_from_w, seed_checkbox,
height,
width,
resize_mode,
inpaint_full_res,
inpaint_full_res_padding,
inpainting_mask_invert,
img2img_batch_input_dir,
img2img_batch_output_dir,
] + custom_inputs,
outputs=[
img2img_gallery,
generation_info,
html_info
],
show_progress=False,
)
img2img_prompt.submit(**img2img_args)
submit.click(**img2img_args)
img2img_interrogate.click(
fn=interrogate,
inputs=[init_img],
outputs=[img2img_prompt],
)
if cmd_opts.deepdanbooru:
img2img_deepbooru.click(
fn=interrogate_deepbooru,
inputs=[init_img],
outputs=[img2img_prompt],
)
save.click(
fn=wrap_gradio_call(save_files),
_js="(x, y, z, w) => [x, y, z, selected_gallery_index()]",
inputs=[
generation_info,
img2img_gallery,
do_make_zip,
html_info,
],
outputs=[
download_files,
html_info,
html_info,
html_info,
]
)
roll.click(
fn=roll_artist,
_js="update_img2img_tokens",
inputs=[
img2img_prompt,
],
outputs=[
img2img_prompt,
]
)
prompts = [(txt2img_prompt, txt2img_negative_prompt), (img2img_prompt, img2img_negative_prompt)]
style_dropdowns = [(txt2img_prompt_style, txt2img_prompt_style2), (img2img_prompt_style, img2img_prompt_style2)]
style_js_funcs = ["update_txt2img_tokens", "update_img2img_tokens"]
for button, (prompt, negative_prompt) in zip([txt2img_save_style, img2img_save_style], prompts):
button.click(
fn=add_style,
_js="ask_for_style_name",
# Have to pass empty dummy component here, because the JavaScript and Python function have to accept
# the same number of parameters, but we only know the style-name after the JavaScript prompt
inputs=[dummy_component, prompt, negative_prompt],
outputs=[txt2img_prompt_style, img2img_prompt_style, txt2img_prompt_style2, img2img_prompt_style2],
)
for button, (prompt, negative_prompt), (style1, style2), js_func in zip([txt2img_prompt_style_apply, img2img_prompt_style_apply], prompts, style_dropdowns, style_js_funcs):
button.click(
fn=apply_styles,
_js=js_func,
inputs=[prompt, negative_prompt, style1, style2],
outputs=[prompt, negative_prompt, style1, style2],
)
img2img_paste_fields = [
(img2img_prompt, "Prompt"),
(img2img_negative_prompt, "Negative prompt"),
(steps, "Steps"),
(sampler_index, "Sampler"),
(restore_faces, "Face restoration"),
(cfg_scale, "CFG scale"),
(seed, "Seed"),
(width, "Size-1"),
(height, "Size-2"),
(batch_size, "Batch size"),
(subseed, "Variation seed"),
(subseed_strength, "Variation seed strength"),
(seed_resize_from_w, "Seed resize from-1"),
(seed_resize_from_h, "Seed resize from-2"),
(denoising_strength, "Denoising strength"),
]
modules.generation_parameters_copypaste.connect_paste(paste, img2img_paste_fields, img2img_prompt)
token_button.click(fn=update_token_counter, inputs=[img2img_prompt, steps], outputs=[token_counter])
with gr.Blocks(analytics_enabled=False) as extras_interface:
with gr.Row().style(equal_height=False):
with gr.Column(variant='panel'):
with gr.Tabs(elem_id="mode_extras"):
with gr.TabItem('Single Image'):
extras_image = gr.Image(label="Source", source="upload", interactive=True, type="pil")
with gr.TabItem('Batch Process'):
image_batch = gr.File(label="Batch Process", file_count="multiple", interactive=True, type="file")
upscaling_resize = gr.Slider(minimum=1.0, maximum=4.0, step=0.05, label="Resize", value=2)
with gr.Group():
extras_upscaler_1 = gr.Radio(label='Upscaler 1', choices=[x.name for x in shared.sd_upscalers], value=shared.sd_upscalers[0].name, type="index")
with gr.Group():
extras_upscaler_2 = gr.Radio(label='Upscaler 2', choices=[x.name for x in shared.sd_upscalers], value=shared.sd_upscalers[0].name, type="index")
extras_upscaler_2_visibility = gr.Slider(minimum=0.0, maximum=1.0, step=0.001, label="Upscaler 2 visibility", value=1)
with gr.Group():
gfpgan_visibility = gr.Slider(minimum=0.0, maximum=1.0, step=0.001, label="GFPGAN visibility", value=0, interactive=modules.gfpgan_model.have_gfpgan)
with gr.Group():
codeformer_visibility = gr.Slider(minimum=0.0, maximum=1.0, step=0.001, label="CodeFormer visibility", value=0, interactive=modules.codeformer_model.have_codeformer)
codeformer_weight = gr.Slider(minimum=0.0, maximum=1.0, step=0.001, label="CodeFormer weight (0 = maximum effect, 1 = minimum effect)", value=0, interactive=modules.codeformer_model.have_codeformer)
submit = gr.Button('Generate', elem_id="extras_generate", variant='primary')
with gr.Column(variant='panel'):
result_images = gr.Gallery(label="Result", show_label=False)
html_info_x = gr.HTML()
html_info = gr.HTML()
extras_send_to_img2img = gr.Button('Send to img2img')
extras_send_to_inpaint = gr.Button('Send to inpaint')
button_id = "hidden_element" if shared.cmd_opts.hide_ui_dir_config else ''
open_extras_folder = gr.Button('Open output directory', elem_id=button_id)
submit.click(
fn=wrap_gradio_gpu_call(modules.extras.run_extras),
_js="get_extras_tab_index",
inputs=[
dummy_component,
extras_image,
image_batch,
gfpgan_visibility,
codeformer_visibility,
codeformer_weight,
upscaling_resize,
extras_upscaler_1,
extras_upscaler_2,
extras_upscaler_2_visibility,
],
outputs=[
result_images,
html_info_x,
html_info,
]
)
extras_send_to_img2img.click(
fn=lambda x: image_from_url_text(x),
_js="extract_image_from_gallery_img2img",
inputs=[result_images],
outputs=[init_img],
)
extras_send_to_inpaint.click(
fn=lambda x: image_from_url_text(x),
_js="extract_image_from_gallery_inpaint",
inputs=[result_images],
outputs=[init_img_with_mask],
)
with gr.Blocks(analytics_enabled=False) as pnginfo_interface:
with gr.Row().style(equal_height=False):
with gr.Column(variant='panel'):
image = gr.Image(elem_id="pnginfo_image", label="Source", source="upload", interactive=True, type="pil")
with gr.Column(variant='panel'):
html = gr.HTML()
generation_info = gr.Textbox(visible=False)
html2 = gr.HTML()
with gr.Row():
pnginfo_send_to_txt2img = gr.Button('Send to txt2img')
pnginfo_send_to_img2img = gr.Button('Send to img2img')
image.change(
fn=wrap_gradio_call(modules.extras.run_pnginfo),
inputs=[image],
outputs=[html, generation_info, html2],
)
with gr.Blocks() as modelmerger_interface:
with gr.Row().style(equal_height=False):
with gr.Column(variant='panel'):
gr.HTML(value="<p>A merger of the two checkpoints will be generated in your <b>checkpoint</b> directory.</p>")
with gr.Row():
primary_model_name = gr.Dropdown(modules.sd_models.checkpoint_tiles(), elem_id="modelmerger_primary_model_name", label="Primary Model Name")
secondary_model_name = gr.Dropdown(modules.sd_models.checkpoint_tiles(), elem_id="modelmerger_secondary_model_name", label="Secondary Model Name")
custom_name = gr.Textbox(label="Custom Name (Optional)")
interp_amount = gr.Slider(minimum=0.0, maximum=1.0, step=0.05, label='Interpolation Amount', value=0.3)
interp_method = gr.Radio(choices=["Weighted Sum", "Sigmoid", "Inverse Sigmoid"], value="Weighted Sum", label="Interpolation Method")
save_as_half = gr.Checkbox(value=False, label="Save as float16")
modelmerger_merge = gr.Button(elem_id="modelmerger_merge", label="Merge", variant='primary')
with gr.Column(variant='panel'):
submit_result = gr.Textbox(elem_id="modelmerger_result", show_label=False)
sd_hijack.model_hijack.embedding_db.load_textual_inversion_embeddings()
with gr.Blocks() as textual_inversion_interface:
with gr.Row().style(equal_height=False):
with gr.Column():
with gr.Group():
gr.HTML(value="<p style='margin-bottom: 0.7em'>See <b><a href=\"https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Textual-Inversion\">wiki</a></b> for detailed explanation.</p>")
gr.HTML(value="<p style='margin-bottom: 0.7em'>Create a new embedding</p>")
new_embedding_name = gr.Textbox(label="Name")
initialization_text = gr.Textbox(label="Initialization text", value="*")
nvpt = gr.Slider(label="Number of vectors per token", minimum=1, maximum=75, step=1, value=1)
with gr.Row():
with gr.Column(scale=3):
gr.HTML(value="")
with gr.Column():
create_embedding = gr.Button(value="Create embedding", variant='primary')
with gr.Group():
gr.HTML(value="<p style='margin-bottom: 0.7em'>Create a new hypernetwork</p>")
new_hypernetwork_name = gr.Textbox(label="Name")
new_hypernetwork_sizes = gr.CheckboxGroup(label="Modules", value=["768", "320", "640", "1280"], choices=["768", "320", "640", "1280"])
with gr.Row():
with gr.Column(scale=3):
gr.HTML(value="")
with gr.Column():
create_hypernetwork = gr.Button(value="Create hypernetwork", variant='primary')
with gr.Group():
gr.HTML(value="<p style='margin-bottom: 0.7em'>Preprocess images</p>")
process_src = gr.Textbox(label='Source directory')
process_dst = gr.Textbox(label='Destination directory')
process_width = gr.Slider(minimum=64, maximum=2048, step=64, label="Width", value=512)
process_height = gr.Slider(minimum=64, maximum=2048, step=64, label="Height", value=512)
with gr.Row():
process_flip = gr.Checkbox(label='Create flipped copies')
process_split = gr.Checkbox(label='Split oversized images into two')
process_caption = gr.Checkbox(label='Use BLIP caption as filename')
with gr.Row():
with gr.Column(scale=3):
gr.HTML(value="")
with gr.Column():
run_preprocess = gr.Button(value="Preprocess", variant='primary')
with gr.Group():
gr.HTML(value="<p style='margin-bottom: 0.7em'>Train an embedding; must specify a directory with a set of 1:1 ratio images</p>")
train_embedding_name = gr.Dropdown(label='Embedding', choices=sorted(sd_hijack.model_hijack.embedding_db.word_embeddings.keys()))
train_hypernetwork_name = gr.Dropdown(label='Hypernetwork', choices=[x for x in shared.hypernetworks.keys()])
learn_rate = gr.Textbox(label='Learning rate', placeholder="Learning rate", value="0.005")
dataset_directory = gr.Textbox(label='Dataset directory', placeholder="Path to directory with input images")
log_directory = gr.Textbox(label='Log directory', placeholder="Path to directory where to write outputs", value="textual_inversion")
template_file = gr.Textbox(label='Prompt template file', value=os.path.join(script_path, "textual_inversion_templates", "style_filewords.txt"))
training_width = gr.Slider(minimum=64, maximum=2048, step=64, label="Width", value=512)
training_height = gr.Slider(minimum=64, maximum=2048, step=64, label="Height", value=512)
steps = gr.Number(label='Max steps', value=100000, precision=0)
num_repeats = gr.Number(label='Number of repeats for a single input image per epoch', value=100, precision=0)
create_image_every = gr.Number(label='Save an image to log directory every N steps, 0 to disable', value=500, precision=0)
save_embedding_every = gr.Number(label='Save a copy of embedding to log directory every N steps, 0 to disable', value=500, precision=0)
preview_image_prompt = gr.Textbox(label='Preview prompt', value="")
with gr.Row():
interrupt_training = gr.Button(value="Interrupt")
train_hypernetwork = gr.Button(value="Train Hypernetwork", variant='primary')
train_embedding = gr.Button(value="Train Embedding", variant='primary')
with gr.Column():
progressbar = gr.HTML(elem_id="ti_progressbar")
ti_output = gr.Text(elem_id="ti_output", value="", show_label=False)
ti_gallery = gr.Gallery(label='Output', show_label=False, elem_id='ti_gallery').style(grid=4)
ti_preview = gr.Image(elem_id='ti_preview', visible=False)
ti_progress = gr.HTML(elem_id="ti_progress", value="")
ti_outcome = gr.HTML(elem_id="ti_error", value="")
setup_progressbar(progressbar, ti_preview, 'ti', textinfo=ti_progress)
create_embedding.click(
fn=modules.textual_inversion.ui.create_embedding,
inputs=[
new_embedding_name,
initialization_text,
nvpt,
],
outputs=[
train_embedding_name,
ti_output,
ti_outcome,
]
)
create_hypernetwork.click(
fn=modules.hypernetworks.ui.create_hypernetwork,
inputs=[
new_hypernetwork_name,
new_hypernetwork_sizes,
],
outputs=[
train_hypernetwork_name,
ti_output,
ti_outcome,
]
)
run_preprocess.click(
fn=wrap_gradio_gpu_call(modules.textual_inversion.ui.preprocess, extra_outputs=[gr.update()]),
_js="start_training_textual_inversion",
inputs=[
process_src,
process_dst,
process_width,
process_height,
process_flip,
process_split,
process_caption,
],
outputs=[
ti_output,
ti_outcome,
],
)
train_embedding.click(
fn=wrap_gradio_gpu_call(modules.textual_inversion.ui.train_embedding, extra_outputs=[gr.update()]),
_js="start_training_textual_inversion",
inputs=[
train_embedding_name,
learn_rate,
dataset_directory,
log_directory,
training_width,
training_height,
steps,
num_repeats,
create_image_every,
save_embedding_every,
template_file,
preview_image_prompt,
],
outputs=[
ti_output,
ti_outcome,
]
)
train_hypernetwork.click(
fn=wrap_gradio_gpu_call(modules.hypernetworks.ui.train_hypernetwork, extra_outputs=[gr.update()]),
_js="start_training_textual_inversion",
inputs=[
train_hypernetwork_name,
learn_rate,
dataset_directory,
log_directory,
steps,
create_image_every,
save_embedding_every,
template_file,
preview_image_prompt,
],
outputs=[
ti_output,
ti_outcome,
]
)
interrupt_training.click(
fn=lambda: shared.state.interrupt(),
inputs=[],
outputs=[],
)
def create_setting_component(key):
def fun():
return opts.data[key] if key in opts.data else opts.data_labels[key].default
info = opts.data_labels[key]
t = type(info.default)
args = info.component_args() if callable(info.component_args) else info.component_args
if info.component is not None:
comp = info.component
elif t == str:
comp = gr.Textbox
elif t == int:
comp = gr.Number
elif t == bool:
comp = gr.Checkbox
else:
raise Exception(f'bad options item type: {str(t)} for key {key}')
return comp(label=info.label, value=fun, **(args or {}))
components = []
component_dict = {}
def open_folder(f):
if not os.path.isdir(f):
print(f"""
WARNING
An open_folder request was made with an argument that is not a folder.
This could be an error or a malicious attempt to run code on your computer.
Requested path was: {f}
""", file=sys.stderr)
return
if not shared.cmd_opts.hide_ui_dir_config:
path = os.path.normpath(f)
if platform.system() == "Windows":
os.startfile(path)
elif platform.system() == "Darwin":
sp.Popen(["open", path])
else:
sp.Popen(["xdg-open", path])
def run_settings(*args):
changed = 0
for key, value, comp in zip(opts.data_labels.keys(), args, components):
if comp != dummy_component and not opts.same_type(value, opts.data_labels[key].default):
return f"Bad value for setting {key}: {value}; expecting {type(opts.data_labels[key].default).__name__}", opts.dumpjson()
for key, value, comp in zip(opts.data_labels.keys(), args, components):
if comp == dummy_component:
continue
comp_args = opts.data_labels[key].component_args
if comp_args and isinstance(comp_args, dict) and comp_args.get('visible') is False:
continue
oldval = opts.data.get(key, None)
opts.data[key] = value
if oldval != value:
if opts.data_labels[key].onchange is not None:
opts.data_labels[key].onchange()
changed += 1
opts.save(shared.config_filename)
return f'{changed} settings changed.', opts.dumpjson()
def run_settings_single(value, key):
if not opts.same_type(value, opts.data_labels[key].default):
return gr.update(visible=True), opts.dumpjson()
oldval = opts.data.get(key, None)
opts.data[key] = value
if oldval != value:
if opts.data_labels[key].onchange is not None:
opts.data_labels[key].onchange()
opts.save(shared.config_filename)
return gr.update(value=value), opts.dumpjson()
with gr.Blocks(analytics_enabled=False) as settings_interface:
settings_submit = gr.Button(value="Apply settings", variant='primary')
result = gr.HTML()
settings_cols = 3
items_per_col = int(len(opts.data_labels) * 0.9 / settings_cols)
quicksettings_list = []
cols_displayed = 0
items_displayed = 0
previous_section = None
column = None
with gr.Row(elem_id="settings").style(equal_height=False):
for i, (k, item) in enumerate(opts.data_labels.items()):
if previous_section != item.section:
if cols_displayed < settings_cols and (items_displayed >= items_per_col or previous_section is None):
if column is not None:
column.__exit__()
column = gr.Column(variant='panel')
column.__enter__()
items_displayed = 0
cols_displayed += 1
previous_section = item.section
gr.HTML(elem_id="settings_header_text_{}".format(item.section[0]), value='<h1 class="gr-button-lg">{}</h1>'.format(item.section[1]))
if item.show_on_main_page:
quicksettings_list.append((i, k, item))
components.append(dummy_component)
else:
component = create_setting_component(k)
component_dict[k] = component
components.append(component)
items_displayed += 1
request_notifications = gr.Button(value='Request browser notifications', elem_id="request_notifications")
request_notifications.click(
fn=lambda: None,
inputs=[],
outputs=[],
_js='function(){}'
)
with gr.Row():
reload_script_bodies = gr.Button(value='Reload custom script bodies (No ui updates, No restart)', variant='secondary')
restart_gradio = gr.Button(value='Restart Gradio and Refresh components (Custom Scripts, ui.py, js and css only)', variant='primary')
def reload_scripts():
modules.scripts.reload_script_body_only()
reload_script_bodies.click(
fn=reload_scripts,
inputs=[],
outputs=[],
_js='function(){}'
)
def request_restart():
shared.state.interrupt()
settings_interface.gradio_ref.do_restart = True
restart_gradio.click(
fn=request_restart,
inputs=[],
outputs=[],
_js='function(){restart_reload()}'
)
if column is not None:
column.__exit__()
interfaces = [
(txt2img_interface, "txt2img", "txt2img"),
(img2img_interface, "img2img", "img2img"),
(extras_interface, "Extras", "extras"),
(pnginfo_interface, "PNG Info", "pnginfo"),
(modelmerger_interface, "Checkpoint Merger", "modelmerger"),
(textual_inversion_interface, "Textual inversion", "ti"),
(settings_interface, "Settings", "settings"),
]
with open(os.path.join(script_path, "style.css"), "r", encoding="utf8") as file:
css = file.read()
if os.path.exists(os.path.join(script_path, "user.css")):
with open(os.path.join(script_path, "user.css"), "r", encoding="utf8") as file:
usercss = file.read()
css += usercss
if not cmd_opts.no_progressbar_hiding:
css += css_hide_progressbar
with gr.Blocks(css=css, analytics_enabled=False, title="Stable Diffusion") as demo:
with gr.Row(elem_id="quicksettings"):
for i, k, item in quicksettings_list:
component = create_setting_component(k)
component_dict[k] = component
settings_interface.gradio_ref = demo
with gr.Tabs() as tabs:
for interface, label, ifid in interfaces:
with gr.TabItem(label, id=ifid, elem_id='tab_' + ifid):
interface.render()
if os.path.exists(os.path.join(script_path, "notification.mp3")):
audio_notification = gr.Audio(interactive=False, value=os.path.join(script_path, "notification.mp3"), elem_id="audio_notification", visible=False)
text_settings = gr.Textbox(elem_id="settings_json", value=lambda: opts.dumpjson(), visible=False)
settings_submit.click(
fn=run_settings,
inputs=components,
outputs=[result, text_settings],
)
for i, k, item in quicksettings_list:
component = component_dict[k]
component.change(
fn=lambda value, k=k: run_settings_single(value, key=k),
inputs=[component],
outputs=[component, text_settings],
)
def modelmerger(*args):
try:
results = modules.extras.run_modelmerger(*args)
except Exception as e:
print("Error loading/saving model file:", file=sys.stderr)
print(traceback.format_exc(), file=sys.stderr)
modules.sd_models.list_models() # to remove the potentially missing models from the list
return ["Error loading/saving model file. It doesn't exist or the name contains illegal characters"] + [gr.Dropdown.update(choices=modules.sd_models.checkpoint_tiles()) for _ in range(3)]
return results
modelmerger_merge.click(
fn=modelmerger,
inputs=[
primary_model_name,
secondary_model_name,
interp_method,
interp_amount,
save_as_half,
custom_name,
],
outputs=[
submit_result,
primary_model_name,
secondary_model_name,
component_dict['sd_model_checkpoint'],
]
)
paste_field_names = ['Prompt', 'Negative prompt', 'Steps', 'Face restoration', 'Seed', 'Size-1', 'Size-2']
txt2img_fields = [field for field,name in txt2img_paste_fields if name in paste_field_names]
img2img_fields = [field for field,name in img2img_paste_fields if name in paste_field_names]
send_to_img2img.click(
fn=lambda img, *args: (image_from_url_text(img),*args),
_js="(gallery, ...args) => [extract_image_from_gallery_img2img(gallery), ...args]",
inputs=[txt2img_gallery] + txt2img_fields,
outputs=[init_img] + img2img_fields,
)
send_to_inpaint.click(
fn=lambda x, *args: (image_from_url_text(x), *args),
_js="(gallery, ...args) => [extract_image_from_gallery_inpaint(gallery), ...args]",
inputs=[txt2img_gallery] + txt2img_fields,
outputs=[init_img_with_mask] + img2img_fields,
)
img2img_send_to_img2img.click(
fn=lambda x: image_from_url_text(x),
_js="extract_image_from_gallery_img2img",
inputs=[img2img_gallery],
outputs=[init_img],
)
img2img_send_to_inpaint.click(
fn=lambda x: image_from_url_text(x),
_js="extract_image_from_gallery_inpaint",
inputs=[img2img_gallery],
outputs=[init_img_with_mask],
)
send_to_extras.click(
fn=lambda x: image_from_url_text(x),
_js="extract_image_from_gallery_extras",
inputs=[txt2img_gallery],
outputs=[extras_image],
)
open_txt2img_folder.click(
fn=lambda: open_folder(opts.outdir_samples or opts.outdir_txt2img_samples),
inputs=[],
outputs=[],
)
open_img2img_folder.click(
fn=lambda: open_folder(opts.outdir_samples or opts.outdir_img2img_samples),
inputs=[],
outputs=[],
)
open_extras_folder.click(
fn=lambda: open_folder(opts.outdir_samples or opts.outdir_extras_samples),
inputs=[],
outputs=[],
)
img2img_send_to_extras.click(
fn=lambda x: image_from_url_text(x),
_js="extract_image_from_gallery_extras",
inputs=[img2img_gallery],
outputs=[extras_image],
)
modules.generation_parameters_copypaste.connect_paste(pnginfo_send_to_txt2img, txt2img_paste_fields, generation_info, 'switch_to_txt2img')
modules.generation_parameters_copypaste.connect_paste(pnginfo_send_to_img2img, img2img_paste_fields, generation_info, 'switch_to_img2img_img2img')
ui_config_file = cmd_opts.ui_config_file
ui_settings = {}
settings_count = len(ui_settings)
error_loading = False
try:
if os.path.exists(ui_config_file):
with open(ui_config_file, "r", encoding="utf8") as file:
ui_settings = json.load(file)
except Exception:
error_loading = True
print("Error loading settings:", file=sys.stderr)
print(traceback.format_exc(), file=sys.stderr)
def loadsave(path, x):
def apply_field(obj, field, condition=None):
key = path + "/" + field
if getattr(obj,'custom_script_source',None) is not None:
key = 'customscript/' + obj.custom_script_source + '/' + key
if getattr(obj, 'do_not_save_to_config', False):
return
saved_value = ui_settings.get(key, None)
if saved_value is None:
ui_settings[key] = getattr(obj, field)
elif condition is None or condition(saved_value):
setattr(obj, field, saved_value)
if type(x) in [gr.Slider, gr.Radio, gr.Checkbox, gr.Textbox, gr.Number] and x.visible:
apply_field(x, 'visible')
if type(x) == gr.Slider:
apply_field(x, 'value')
apply_field(x, 'minimum')
apply_field(x, 'maximum')
apply_field(x, 'step')
if type(x) == gr.Radio:
apply_field(x, 'value', lambda val: val in x.choices)
if type(x) == gr.Checkbox:
apply_field(x, 'value')
if type(x) == gr.Textbox:
apply_field(x, 'value')
if type(x) == gr.Number:
apply_field(x, 'value')
visit(txt2img_interface, loadsave, "txt2img")
visit(img2img_interface, loadsave, "img2img")
visit(extras_interface, loadsave, "extras")
if not error_loading and (not os.path.exists(ui_config_file) or settings_count != len(ui_settings)):
with open(ui_config_file, "w", encoding="utf8") as file:
json.dump(ui_settings, file, indent=4)
return demo
with open(os.path.join(script_path, "script.js"), "r", encoding="utf8") as jsfile:
javascript = f'<script>{jsfile.read()}</script>'
jsdir = os.path.join(script_path, "javascript")
for filename in sorted(os.listdir(jsdir)):
with open(os.path.join(jsdir, filename), "r", encoding="utf8") as jsfile:
javascript += f"\n<script>{jsfile.read()}</script>"
if 'gradio_routes_templates_response' not in globals():
def template_response(*args, **kwargs):
res = gradio_routes_templates_response(*args, **kwargs)
res.body = res.body.replace(b'</head>', f'{javascript}</head>'.encode("utf8"))
res.init_headers()
return res
gradio_routes_templates_response = gradio.routes.templates.TemplateResponse
gradio.routes.templates.TemplateResponse = template_response
<|code_end|>
|
modules/ui.py
<|code_start|>import base64
import html
import io
import json
import math
import mimetypes
import os
import random
import sys
import time
import traceback
import platform
import subprocess as sp
from functools import reduce
import numpy as np
import torch
from PIL import Image, PngImagePlugin
import piexif
import gradio as gr
import gradio.utils
import gradio.routes
from modules import sd_hijack
from modules.paths import script_path
from modules.shared import opts, cmd_opts
if cmd_opts.deepdanbooru:
from modules.deepbooru import get_deepbooru_tags
import modules.shared as shared
from modules.sd_samplers import samplers, samplers_for_img2img
from modules.sd_hijack import model_hijack
import modules.ldsr_model
import modules.scripts
import modules.gfpgan_model
import modules.codeformer_model
import modules.styles
import modules.generation_parameters_copypaste
from modules import prompt_parser
from modules.images import save_image
import modules.textual_inversion.ui
import modules.hypernetworks.ui
# this is a fix for Windows users. Without it, javascript files will be served with text/html content-type and the browser will not show any UI
mimetypes.init()
mimetypes.add_type('application/javascript', '.js')
if not cmd_opts.share and not cmd_opts.listen:
# fix gradio phoning home
gradio.utils.version_check = lambda: None
gradio.utils.get_local_ip_address = lambda: '127.0.0.1'
if cmd_opts.ngrok != None:
import modules.ngrok as ngrok
print('ngrok authtoken detected, trying to connect...')
ngrok.connect(cmd_opts.ngrok, cmd_opts.port if cmd_opts.port != None else 7860)
def gr_show(visible=True):
return {"visible": visible, "__type__": "update"}
sample_img2img = "assets/stable-samples/img2img/sketch-mountains-input.jpg"
sample_img2img = sample_img2img if os.path.exists(sample_img2img) else None
css_hide_progressbar = """
.wrap .m-12 svg { display:none!important; }
.wrap .m-12::before { content:"Loading..." }
.progress-bar { display:none!important; }
.meta-text { display:none!important; }
"""
# Using constants for these since the variation selector isn't visible.
# Important that they exactly match script.js for tooltip to work.
random_symbol = '\U0001f3b2\ufe0f' # 🎲️
reuse_symbol = '\u267b\ufe0f' # ♻️
art_symbol = '\U0001f3a8' # 🎨
paste_symbol = '\u2199\ufe0f' # ↙
folder_symbol = '\U0001f4c2' # 📂
def plaintext_to_html(text):
text = "<p>" + "<br>\n".join([f"{html.escape(x)}" for x in text.split('\n')]) + "</p>"
return text
def image_from_url_text(filedata):
if type(filedata) == list:
if len(filedata) == 0:
return None
filedata = filedata[0]
if filedata.startswith("data:image/png;base64,"):
filedata = filedata[len("data:image/png;base64,"):]
filedata = base64.decodebytes(filedata.encode('utf-8'))
image = Image.open(io.BytesIO(filedata))
return image
def send_gradio_gallery_to_image(x):
if len(x) == 0:
return None
return image_from_url_text(x[0])
def save_files(js_data, images, do_make_zip, index):
import csv
filenames = []
fullfns = []
#quick dictionary to class object conversion. Its necessary due apply_filename_pattern requiring it
class MyObject:
def __init__(self, d=None):
if d is not None:
for key, value in d.items():
setattr(self, key, value)
data = json.loads(js_data)
p = MyObject(data)
path = opts.outdir_save
save_to_dirs = opts.use_save_to_dirs_for_ui
extension: str = opts.samples_format
start_index = 0
if index > -1 and opts.save_selected_only and (index >= data["index_of_first_image"]): # ensures we are looking at a specific non-grid picture, and we have save_selected_only
images = [images[index]]
start_index = index
os.makedirs(opts.outdir_save, exist_ok=True)
with open(os.path.join(opts.outdir_save, "log.csv"), "a", encoding="utf8", newline='') as file:
at_start = file.tell() == 0
writer = csv.writer(file)
if at_start:
writer.writerow(["prompt", "seed", "width", "height", "sampler", "cfgs", "steps", "filename", "negative_prompt"])
for image_index, filedata in enumerate(images, start_index):
if filedata.startswith("data:image/png;base64,"):
filedata = filedata[len("data:image/png;base64,"):]
image = Image.open(io.BytesIO(base64.decodebytes(filedata.encode('utf-8'))))
is_grid = image_index < p.index_of_first_image
i = 0 if is_grid else (image_index - p.index_of_first_image)
fullfn, txt_fullfn = save_image(image, path, "", seed=p.all_seeds[i], prompt=p.all_prompts[i], extension=extension, info=p.infotexts[image_index], grid=is_grid, p=p, save_to_dirs=save_to_dirs)
filename = os.path.relpath(fullfn, path)
filenames.append(filename)
fullfns.append(fullfn)
if txt_fullfn:
filenames.append(os.path.basename(txt_fullfn))
fullfns.append(txt_fullfn)
writer.writerow([data["prompt"], data["seed"], data["width"], data["height"], data["sampler"], data["cfg_scale"], data["steps"], filenames[0], data["negative_prompt"]])
# Make Zip
if do_make_zip:
zip_filepath = os.path.join(path, "images.zip")
from zipfile import ZipFile
with ZipFile(zip_filepath, "w") as zip_file:
for i in range(len(fullfns)):
with open(fullfns[i], mode="rb") as f:
zip_file.writestr(filenames[i], f.read())
fullfns.insert(0, zip_filepath)
return gr.File.update(value=fullfns, visible=True), '', '', plaintext_to_html(f"Saved: {filenames[0]}")
def wrap_gradio_call(func, extra_outputs=None):
def f(*args, extra_outputs_array=extra_outputs, **kwargs):
run_memmon = opts.memmon_poll_rate > 0 and not shared.mem_mon.disabled
if run_memmon:
shared.mem_mon.monitor()
t = time.perf_counter()
try:
res = list(func(*args, **kwargs))
except Exception as e:
print("Error completing request", file=sys.stderr)
print("Arguments:", args, kwargs, file=sys.stderr)
print(traceback.format_exc(), file=sys.stderr)
shared.state.job = ""
shared.state.job_count = 0
if extra_outputs_array is None:
extra_outputs_array = [None, '']
res = extra_outputs_array + [f"<div class='error'>{plaintext_to_html(type(e).__name__+': '+str(e))}</div>"]
elapsed = time.perf_counter() - t
elapsed_m = int(elapsed // 60)
elapsed_s = elapsed % 60
elapsed_text = f"{elapsed_s:.2f}s"
if (elapsed_m > 0):
elapsed_text = f"{elapsed_m}m "+elapsed_text
if run_memmon:
mem_stats = {k: -(v//-(1024*1024)) for k, v in shared.mem_mon.stop().items()}
active_peak = mem_stats['active_peak']
reserved_peak = mem_stats['reserved_peak']
sys_peak = mem_stats['system_peak']
sys_total = mem_stats['total']
sys_pct = round(sys_peak/max(sys_total, 1) * 100, 2)
vram_html = f"<p class='vram'>Torch active/reserved: {active_peak}/{reserved_peak} MiB, <wbr>Sys VRAM: {sys_peak}/{sys_total} MiB ({sys_pct}%)</p>"
else:
vram_html = ''
# last item is always HTML
res[-1] += f"<div class='performance'><p class='time'>Time taken: <wbr>{elapsed_text}</p>{vram_html}</div>"
shared.state.skipped = False
shared.state.interrupted = False
shared.state.job_count = 0
return tuple(res)
return f
def check_progress_call(id_part):
if shared.state.job_count == 0:
return "", gr_show(False), gr_show(False), gr_show(False)
progress = 0
if shared.state.job_count > 0:
progress += shared.state.job_no / shared.state.job_count
if shared.state.sampling_steps > 0:
progress += 1 / shared.state.job_count * shared.state.sampling_step / shared.state.sampling_steps
progress = min(progress, 1)
progressbar = ""
if opts.show_progressbar:
progressbar = f"""<div class='progressDiv'><div class='progress' style="width:{progress * 100}%">{str(int(progress*100))+"%" if progress > 0.01 else ""}</div></div>"""
image = gr_show(False)
preview_visibility = gr_show(False)
if opts.show_progress_every_n_steps > 0:
if shared.parallel_processing_allowed:
if shared.state.sampling_step - shared.state.current_image_sampling_step >= opts.show_progress_every_n_steps and shared.state.current_latent is not None:
shared.state.current_image = modules.sd_samplers.sample_to_image(shared.state.current_latent)
shared.state.current_image_sampling_step = shared.state.sampling_step
image = shared.state.current_image
if image is None:
image = gr.update(value=None)
else:
preview_visibility = gr_show(True)
if shared.state.textinfo is not None:
textinfo_result = gr.HTML.update(value=shared.state.textinfo, visible=True)
else:
textinfo_result = gr_show(False)
return f"<span id='{id_part}_progress_span' style='display: none'>{time.time()}</span><p>{progressbar}</p>", preview_visibility, image, textinfo_result
def check_progress_call_initial(id_part):
shared.state.job_count = -1
shared.state.current_latent = None
shared.state.current_image = None
shared.state.textinfo = None
return check_progress_call(id_part)
def roll_artist(prompt):
allowed_cats = set([x for x in shared.artist_db.categories() if len(opts.random_artist_categories)==0 or x in opts.random_artist_categories])
artist = random.choice([x for x in shared.artist_db.artists if x.category in allowed_cats])
return prompt + ", " + artist.name if prompt != '' else artist.name
def visit(x, func, path=""):
if hasattr(x, 'children'):
for c in x.children:
visit(c, func, path)
elif x.label is not None:
func(path + "/" + str(x.label), x)
def add_style(name: str, prompt: str, negative_prompt: str):
if name is None:
return [gr_show(), gr_show()]
style = modules.styles.PromptStyle(name, prompt, negative_prompt)
shared.prompt_styles.styles[style.name] = style
# Save all loaded prompt styles: this allows us to update the storage format in the future more easily, because we
# reserialize all styles every time we save them
shared.prompt_styles.save_styles(shared.styles_filename)
return [gr.Dropdown.update(visible=True, choices=list(shared.prompt_styles.styles)) for _ in range(4)]
def apply_styles(prompt, prompt_neg, style1_name, style2_name):
prompt = shared.prompt_styles.apply_styles_to_prompt(prompt, [style1_name, style2_name])
prompt_neg = shared.prompt_styles.apply_negative_styles_to_prompt(prompt_neg, [style1_name, style2_name])
return [gr.Textbox.update(value=prompt), gr.Textbox.update(value=prompt_neg), gr.Dropdown.update(value="None"), gr.Dropdown.update(value="None")]
def interrogate(image):
prompt = shared.interrogator.interrogate(image)
return gr_show(True) if prompt is None else prompt
def interrogate_deepbooru(image):
prompt = get_deepbooru_tags(image, opts.interrogate_deepbooru_score_threshold)
return gr_show(True) if prompt is None else prompt
def create_seed_inputs():
with gr.Row():
with gr.Box():
with gr.Row(elem_id='seed_row'):
seed = (gr.Textbox if cmd_opts.use_textbox_seed else gr.Number)(label='Seed', value=-1)
seed.style(container=False)
random_seed = gr.Button(random_symbol, elem_id='random_seed')
reuse_seed = gr.Button(reuse_symbol, elem_id='reuse_seed')
with gr.Box(elem_id='subseed_show_box'):
seed_checkbox = gr.Checkbox(label='Extra', elem_id='subseed_show', value=False)
# Components to show/hide based on the 'Extra' checkbox
seed_extras = []
with gr.Row(visible=False) as seed_extra_row_1:
seed_extras.append(seed_extra_row_1)
with gr.Box():
with gr.Row(elem_id='subseed_row'):
subseed = gr.Number(label='Variation seed', value=-1)
subseed.style(container=False)
random_subseed = gr.Button(random_symbol, elem_id='random_subseed')
reuse_subseed = gr.Button(reuse_symbol, elem_id='reuse_subseed')
subseed_strength = gr.Slider(label='Variation strength', value=0.0, minimum=0, maximum=1, step=0.01)
with gr.Row(visible=False) as seed_extra_row_2:
seed_extras.append(seed_extra_row_2)
seed_resize_from_w = gr.Slider(minimum=0, maximum=2048, step=64, label="Resize seed from width", value=0)
seed_resize_from_h = gr.Slider(minimum=0, maximum=2048, step=64, label="Resize seed from height", value=0)
random_seed.click(fn=lambda: -1, show_progress=False, inputs=[], outputs=[seed])
random_subseed.click(fn=lambda: -1, show_progress=False, inputs=[], outputs=[subseed])
def change_visibility(show):
return {comp: gr_show(show) for comp in seed_extras}
seed_checkbox.change(change_visibility, show_progress=False, inputs=[seed_checkbox], outputs=seed_extras)
return seed, reuse_seed, subseed, reuse_subseed, subseed_strength, seed_resize_from_h, seed_resize_from_w, seed_checkbox
def connect_reuse_seed(seed: gr.Number, reuse_seed: gr.Button, generation_info: gr.Textbox, dummy_component, is_subseed):
""" Connects a 'reuse (sub)seed' button's click event so that it copies last used
(sub)seed value from generation info the to the seed field. If copying subseed and subseed strength
was 0, i.e. no variation seed was used, it copies the normal seed value instead."""
def copy_seed(gen_info_string: str, index):
res = -1
try:
gen_info = json.loads(gen_info_string)
index -= gen_info.get('index_of_first_image', 0)
if is_subseed and gen_info.get('subseed_strength', 0) > 0:
all_subseeds = gen_info.get('all_subseeds', [-1])
res = all_subseeds[index if 0 <= index < len(all_subseeds) else 0]
else:
all_seeds = gen_info.get('all_seeds', [-1])
res = all_seeds[index if 0 <= index < len(all_seeds) else 0]
except json.decoder.JSONDecodeError as e:
if gen_info_string != '':
print("Error parsing JSON generation info:", file=sys.stderr)
print(gen_info_string, file=sys.stderr)
return [res, gr_show(False)]
reuse_seed.click(
fn=copy_seed,
_js="(x, y) => [x, selected_gallery_index()]",
show_progress=False,
inputs=[generation_info, dummy_component],
outputs=[seed, dummy_component]
)
def update_token_counter(text, steps):
try:
_, prompt_flat_list, _ = prompt_parser.get_multicond_prompt_list([text])
prompt_schedules = prompt_parser.get_learned_conditioning_prompt_schedules(prompt_flat_list, steps)
except Exception:
# a parsing error can happen here during typing, and we don't want to bother the user with
# messages related to it in console
prompt_schedules = [[[steps, text]]]
flat_prompts = reduce(lambda list1, list2: list1+list2, prompt_schedules)
prompts = [prompt_text for step, prompt_text in flat_prompts]
tokens, token_count, max_length = max([model_hijack.tokenize(prompt) for prompt in prompts], key=lambda args: args[1])
style_class = ' class="red"' if (token_count > max_length) else ""
return f"<span {style_class}>{token_count}/{max_length}</span>"
def create_toprow(is_img2img):
id_part = "img2img" if is_img2img else "txt2img"
with gr.Row(elem_id="toprow"):
with gr.Column(scale=4):
with gr.Row():
with gr.Column(scale=80):
with gr.Row():
prompt = gr.Textbox(label="Prompt", elem_id=f"{id_part}_prompt", show_label=False, placeholder="Prompt", lines=2)
with gr.Column(scale=1, elem_id="roll_col"):
roll = gr.Button(value=art_symbol, elem_id="roll", visible=len(shared.artist_db.artists) > 0)
paste = gr.Button(value=paste_symbol, elem_id="paste")
token_counter = gr.HTML(value="<span></span>", elem_id=f"{id_part}_token_counter")
token_button = gr.Button(visible=False, elem_id=f"{id_part}_token_button")
with gr.Column(scale=10, elem_id="style_pos_col"):
prompt_style = gr.Dropdown(label="Style 1", elem_id=f"{id_part}_style_index", choices=[k for k, v in shared.prompt_styles.styles.items()], value=next(iter(shared.prompt_styles.styles.keys())), visible=len(shared.prompt_styles.styles) > 1)
with gr.Row():
with gr.Column(scale=8):
with gr.Row():
negative_prompt = gr.Textbox(label="Negative prompt", elem_id="negative_prompt", show_label=False, placeholder="Negative prompt", lines=2)
with gr.Column(scale=1, elem_id="roll_col"):
sh = gr.Button(elem_id="sh", visible=True)
with gr.Column(scale=1, elem_id="style_neg_col"):
prompt_style2 = gr.Dropdown(label="Style 2", elem_id=f"{id_part}_style2_index", choices=[k for k, v in shared.prompt_styles.styles.items()], value=next(iter(shared.prompt_styles.styles.keys())), visible=len(shared.prompt_styles.styles) > 1)
with gr.Column(scale=1):
with gr.Row():
skip = gr.Button('Skip', elem_id=f"{id_part}_skip")
interrupt = gr.Button('Interrupt', elem_id=f"{id_part}_interrupt")
submit = gr.Button('Generate', elem_id=f"{id_part}_generate", variant='primary')
skip.click(
fn=lambda: shared.state.skip(),
inputs=[],
outputs=[],
)
interrupt.click(
fn=lambda: shared.state.interrupt(),
inputs=[],
outputs=[],
)
with gr.Row(scale=1):
if is_img2img:
interrogate = gr.Button('Interrogate\nCLIP', elem_id="interrogate")
if cmd_opts.deepdanbooru:
deepbooru = gr.Button('Interrogate\nDeepBooru', elem_id="deepbooru")
else:
deepbooru = None
else:
interrogate = None
deepbooru = None
prompt_style_apply = gr.Button('Apply style', elem_id="style_apply")
save_style = gr.Button('Create style', elem_id="style_create")
return prompt, roll, prompt_style, negative_prompt, prompt_style2, submit, interrogate, deepbooru, prompt_style_apply, save_style, paste, token_counter, token_button
def setup_progressbar(progressbar, preview, id_part, textinfo=None):
if textinfo is None:
textinfo = gr.HTML(visible=False)
check_progress = gr.Button('Check progress', elem_id=f"{id_part}_check_progress", visible=False)
check_progress.click(
fn=lambda: check_progress_call(id_part),
show_progress=False,
inputs=[],
outputs=[progressbar, preview, preview, textinfo],
)
check_progress_initial = gr.Button('Check progress (first)', elem_id=f"{id_part}_check_progress_initial", visible=False)
check_progress_initial.click(
fn=lambda: check_progress_call_initial(id_part),
show_progress=False,
inputs=[],
outputs=[progressbar, preview, preview, textinfo],
)
def create_ui(wrap_gradio_gpu_call):
import modules.img2img
import modules.txt2img
with gr.Blocks(analytics_enabled=False) as txt2img_interface:
txt2img_prompt, roll, txt2img_prompt_style, txt2img_negative_prompt, txt2img_prompt_style2, submit, _, _, txt2img_prompt_style_apply, txt2img_save_style, paste, token_counter, token_button = create_toprow(is_img2img=False)
dummy_component = gr.Label(visible=False)
with gr.Row(elem_id='txt2img_progress_row'):
with gr.Column(scale=1):
pass
with gr.Column(scale=1):
progressbar = gr.HTML(elem_id="txt2img_progressbar")
txt2img_preview = gr.Image(elem_id='txt2img_preview', visible=False)
setup_progressbar(progressbar, txt2img_preview, 'txt2img')
with gr.Row().style(equal_height=False):
with gr.Column(variant='panel'):
steps = gr.Slider(minimum=1, maximum=150, step=1, label="Sampling Steps", value=20)
sampler_index = gr.Radio(label='Sampling method', elem_id="txt2img_sampling", choices=[x.name for x in samplers], value=samplers[0].name, type="index")
with gr.Group():
width = gr.Slider(minimum=64, maximum=2048, step=64, label="Width", value=512)
height = gr.Slider(minimum=64, maximum=2048, step=64, label="Height", value=512)
with gr.Row():
restore_faces = gr.Checkbox(label='Restore faces', value=False, visible=len(shared.face_restorers) > 1)
tiling = gr.Checkbox(label='Tiling', value=False)
enable_hr = gr.Checkbox(label='Highres. fix', value=False)
with gr.Row(visible=False) as hr_options:
scale_latent = gr.Checkbox(label='Scale latent', value=False)
denoising_strength = gr.Slider(minimum=0.0, maximum=1.0, step=0.01, label='Denoising strength', value=0.7)
with gr.Row():
batch_count = gr.Slider(minimum=1, step=1, label='Batch count', value=1)
batch_size = gr.Slider(minimum=1, maximum=8, step=1, label='Batch size', value=1)
cfg_scale = gr.Slider(minimum=1.0, maximum=30.0, step=0.5, label='CFG Scale', value=7.0)
seed, reuse_seed, subseed, reuse_subseed, subseed_strength, seed_resize_from_h, seed_resize_from_w, seed_checkbox = create_seed_inputs()
with gr.Group():
custom_inputs = modules.scripts.scripts_txt2img.setup_ui(is_img2img=False)
with gr.Column(variant='panel'):
with gr.Group():
txt2img_preview = gr.Image(elem_id='txt2img_preview', visible=False)
txt2img_gallery = gr.Gallery(label='Output', show_label=False, elem_id='txt2img_gallery').style(grid=4)
with gr.Group():
with gr.Row():
save = gr.Button('Save')
send_to_img2img = gr.Button('Send to img2img')
send_to_inpaint = gr.Button('Send to inpaint')
send_to_extras = gr.Button('Send to extras')
button_id = "hidden_element" if shared.cmd_opts.hide_ui_dir_config else 'open_folder'
open_txt2img_folder = gr.Button(folder_symbol, elem_id=button_id)
with gr.Row():
do_make_zip = gr.Checkbox(label="Make Zip when Save?", value=False)
with gr.Row():
download_files = gr.File(None, file_count="multiple", interactive=False, show_label=False, visible=False)
with gr.Group():
html_info = gr.HTML()
generation_info = gr.Textbox(visible=False)
connect_reuse_seed(seed, reuse_seed, generation_info, dummy_component, is_subseed=False)
connect_reuse_seed(subseed, reuse_subseed, generation_info, dummy_component, is_subseed=True)
txt2img_args = dict(
fn=wrap_gradio_gpu_call(modules.txt2img.txt2img),
_js="submit",
inputs=[
txt2img_prompt,
txt2img_negative_prompt,
txt2img_prompt_style,
txt2img_prompt_style2,
steps,
sampler_index,
restore_faces,
tiling,
batch_count,
batch_size,
cfg_scale,
seed,
subseed, subseed_strength, seed_resize_from_h, seed_resize_from_w, seed_checkbox,
height,
width,
enable_hr,
scale_latent,
denoising_strength,
] + custom_inputs,
outputs=[
txt2img_gallery,
generation_info,
html_info
],
show_progress=False,
)
txt2img_prompt.submit(**txt2img_args)
submit.click(**txt2img_args)
enable_hr.change(
fn=lambda x: gr_show(x),
inputs=[enable_hr],
outputs=[hr_options],
)
save.click(
fn=wrap_gradio_call(save_files),
_js="(x, y, z, w) => [x, y, z, selected_gallery_index()]",
inputs=[
generation_info,
txt2img_gallery,
do_make_zip,
html_info,
],
outputs=[
download_files,
html_info,
html_info,
html_info,
]
)
roll.click(
fn=roll_artist,
_js="update_txt2img_tokens",
inputs=[
txt2img_prompt,
],
outputs=[
txt2img_prompt,
]
)
txt2img_paste_fields = [
(txt2img_prompt, "Prompt"),
(txt2img_negative_prompt, "Negative prompt"),
(steps, "Steps"),
(sampler_index, "Sampler"),
(restore_faces, "Face restoration"),
(cfg_scale, "CFG scale"),
(seed, "Seed"),
(width, "Size-1"),
(height, "Size-2"),
(batch_size, "Batch size"),
(subseed, "Variation seed"),
(subseed_strength, "Variation seed strength"),
(seed_resize_from_w, "Seed resize from-1"),
(seed_resize_from_h, "Seed resize from-2"),
(denoising_strength, "Denoising strength"),
(enable_hr, lambda d: "Denoising strength" in d),
(hr_options, lambda d: gr.Row.update(visible="Denoising strength" in d)),
]
modules.generation_parameters_copypaste.connect_paste(paste, txt2img_paste_fields, txt2img_prompt)
token_button.click(fn=update_token_counter, inputs=[txt2img_prompt, steps], outputs=[token_counter])
with gr.Blocks(analytics_enabled=False) as img2img_interface:
img2img_prompt, roll, img2img_prompt_style, img2img_negative_prompt, img2img_prompt_style2, submit, img2img_interrogate, img2img_deepbooru, img2img_prompt_style_apply, img2img_save_style, paste, token_counter, token_button = create_toprow(is_img2img=True)
with gr.Row(elem_id='img2img_progress_row'):
with gr.Column(scale=1):
pass
with gr.Column(scale=1):
progressbar = gr.HTML(elem_id="img2img_progressbar")
img2img_preview = gr.Image(elem_id='img2img_preview', visible=False)
setup_progressbar(progressbar, img2img_preview, 'img2img')
with gr.Row().style(equal_height=False):
with gr.Column(variant='panel'):
with gr.Tabs(elem_id="mode_img2img") as tabs_img2img_mode:
with gr.TabItem('img2img', id='img2img'):
init_img = gr.Image(label="Image for img2img", elem_id="img2img_image", show_label=False, source="upload", interactive=True, type="pil", tool=cmd_opts.gradio_img2img_tool)
with gr.TabItem('Inpaint', id='inpaint'):
init_img_with_mask = gr.Image(label="Image for inpainting with mask", show_label=False, elem_id="img2maskimg", source="upload", interactive=True, type="pil", tool="sketch", image_mode="RGBA")
init_img_inpaint = gr.Image(label="Image for img2img", show_label=False, source="upload", interactive=True, type="pil", visible=False, elem_id="img_inpaint_base")
init_mask_inpaint = gr.Image(label="Mask", source="upload", interactive=True, type="pil", visible=False, elem_id="img_inpaint_mask")
mask_blur = gr.Slider(label='Mask blur', minimum=0, maximum=64, step=1, value=4)
with gr.Row():
mask_mode = gr.Radio(label="Mask mode", show_label=False, choices=["Draw mask", "Upload mask"], type="index", value="Draw mask", elem_id="mask_mode")
inpainting_mask_invert = gr.Radio(label='Masking mode', show_label=False, choices=['Inpaint masked', 'Inpaint not masked'], value='Inpaint masked', type="index")
inpainting_fill = gr.Radio(label='Masked content', choices=['fill', 'original', 'latent noise', 'latent nothing'], value='original', type="index")
with gr.Row():
inpaint_full_res = gr.Checkbox(label='Inpaint at full resolution', value=False)
inpaint_full_res_padding = gr.Slider(label='Inpaint at full resolution padding, pixels', minimum=0, maximum=256, step=4, value=32)
with gr.TabItem('Batch img2img', id='batch'):
hidden = '<br>Disabled when launched with --hide-ui-dir-config.' if shared.cmd_opts.hide_ui_dir_config else ''
gr.HTML(f"<p class=\"text-gray-500\">Process images in a directory on the same machine where the server is running.<br>Use an empty output directory to save pictures normally instead of writing to the output directory.{hidden}</p>")
img2img_batch_input_dir = gr.Textbox(label="Input directory", **shared.hide_dirs)
img2img_batch_output_dir = gr.Textbox(label="Output directory", **shared.hide_dirs)
with gr.Row():
resize_mode = gr.Radio(label="Resize mode", elem_id="resize_mode", show_label=False, choices=["Just resize", "Crop and resize", "Resize and fill"], type="index", value="Just resize")
steps = gr.Slider(minimum=1, maximum=150, step=1, label="Sampling Steps", value=20)
sampler_index = gr.Radio(label='Sampling method', choices=[x.name for x in samplers_for_img2img], value=samplers_for_img2img[0].name, type="index")
with gr.Group():
width = gr.Slider(minimum=64, maximum=2048, step=64, label="Width", value=512)
height = gr.Slider(minimum=64, maximum=2048, step=64, label="Height", value=512)
with gr.Row():
restore_faces = gr.Checkbox(label='Restore faces', value=False, visible=len(shared.face_restorers) > 1)
tiling = gr.Checkbox(label='Tiling', value=False)
with gr.Row():
batch_count = gr.Slider(minimum=1, step=1, label='Batch count', value=1)
batch_size = gr.Slider(minimum=1, maximum=8, step=1, label='Batch size', value=1)
with gr.Group():
cfg_scale = gr.Slider(minimum=1.0, maximum=30.0, step=0.5, label='CFG Scale', value=7.0)
denoising_strength = gr.Slider(minimum=0.0, maximum=1.0, step=0.01, label='Denoising strength', value=0.75)
seed, reuse_seed, subseed, reuse_subseed, subseed_strength, seed_resize_from_h, seed_resize_from_w, seed_checkbox = create_seed_inputs()
with gr.Group():
custom_inputs = modules.scripts.scripts_img2img.setup_ui(is_img2img=True)
with gr.Column(variant='panel'):
with gr.Group():
img2img_preview = gr.Image(elem_id='img2img_preview', visible=False)
img2img_gallery = gr.Gallery(label='Output', show_label=False, elem_id='img2img_gallery').style(grid=4)
with gr.Group():
with gr.Row():
save = gr.Button('Save')
img2img_send_to_img2img = gr.Button('Send to img2img')
img2img_send_to_inpaint = gr.Button('Send to inpaint')
img2img_send_to_extras = gr.Button('Send to extras')
button_id = "hidden_element" if shared.cmd_opts.hide_ui_dir_config else 'open_folder'
open_img2img_folder = gr.Button(folder_symbol, elem_id=button_id)
with gr.Row():
do_make_zip = gr.Checkbox(label="Make Zip when Save?", value=False)
with gr.Row():
download_files = gr.File(None, file_count="multiple", interactive=False, show_label=False, visible=False)
with gr.Group():
html_info = gr.HTML()
generation_info = gr.Textbox(visible=False)
connect_reuse_seed(seed, reuse_seed, generation_info, dummy_component, is_subseed=False)
connect_reuse_seed(subseed, reuse_subseed, generation_info, dummy_component, is_subseed=True)
mask_mode.change(
lambda mode, img: {
init_img_with_mask: gr_show(mode == 0),
init_img_inpaint: gr_show(mode == 1),
init_mask_inpaint: gr_show(mode == 1),
},
inputs=[mask_mode, init_img_with_mask],
outputs=[
init_img_with_mask,
init_img_inpaint,
init_mask_inpaint,
],
)
img2img_args = dict(
fn=wrap_gradio_gpu_call(modules.img2img.img2img),
_js="submit_img2img",
inputs=[
dummy_component,
img2img_prompt,
img2img_negative_prompt,
img2img_prompt_style,
img2img_prompt_style2,
init_img,
init_img_with_mask,
init_img_inpaint,
init_mask_inpaint,
mask_mode,
steps,
sampler_index,
mask_blur,
inpainting_fill,
restore_faces,
tiling,
batch_count,
batch_size,
cfg_scale,
denoising_strength,
seed,
subseed, subseed_strength, seed_resize_from_h, seed_resize_from_w, seed_checkbox,
height,
width,
resize_mode,
inpaint_full_res,
inpaint_full_res_padding,
inpainting_mask_invert,
img2img_batch_input_dir,
img2img_batch_output_dir,
] + custom_inputs,
outputs=[
img2img_gallery,
generation_info,
html_info
],
show_progress=False,
)
img2img_prompt.submit(**img2img_args)
submit.click(**img2img_args)
img2img_interrogate.click(
fn=interrogate,
inputs=[init_img],
outputs=[img2img_prompt],
)
if cmd_opts.deepdanbooru:
img2img_deepbooru.click(
fn=interrogate_deepbooru,
inputs=[init_img],
outputs=[img2img_prompt],
)
save.click(
fn=wrap_gradio_call(save_files),
_js="(x, y, z, w) => [x, y, z, selected_gallery_index()]",
inputs=[
generation_info,
img2img_gallery,
do_make_zip,
html_info,
],
outputs=[
download_files,
html_info,
html_info,
html_info,
]
)
roll.click(
fn=roll_artist,
_js="update_img2img_tokens",
inputs=[
img2img_prompt,
],
outputs=[
img2img_prompt,
]
)
prompts = [(txt2img_prompt, txt2img_negative_prompt), (img2img_prompt, img2img_negative_prompt)]
style_dropdowns = [(txt2img_prompt_style, txt2img_prompt_style2), (img2img_prompt_style, img2img_prompt_style2)]
style_js_funcs = ["update_txt2img_tokens", "update_img2img_tokens"]
for button, (prompt, negative_prompt) in zip([txt2img_save_style, img2img_save_style], prompts):
button.click(
fn=add_style,
_js="ask_for_style_name",
# Have to pass empty dummy component here, because the JavaScript and Python function have to accept
# the same number of parameters, but we only know the style-name after the JavaScript prompt
inputs=[dummy_component, prompt, negative_prompt],
outputs=[txt2img_prompt_style, img2img_prompt_style, txt2img_prompt_style2, img2img_prompt_style2],
)
for button, (prompt, negative_prompt), (style1, style2), js_func in zip([txt2img_prompt_style_apply, img2img_prompt_style_apply], prompts, style_dropdowns, style_js_funcs):
button.click(
fn=apply_styles,
_js=js_func,
inputs=[prompt, negative_prompt, style1, style2],
outputs=[prompt, negative_prompt, style1, style2],
)
img2img_paste_fields = [
(img2img_prompt, "Prompt"),
(img2img_negative_prompt, "Negative prompt"),
(steps, "Steps"),
(sampler_index, "Sampler"),
(restore_faces, "Face restoration"),
(cfg_scale, "CFG scale"),
(seed, "Seed"),
(width, "Size-1"),
(height, "Size-2"),
(batch_size, "Batch size"),
(subseed, "Variation seed"),
(subseed_strength, "Variation seed strength"),
(seed_resize_from_w, "Seed resize from-1"),
(seed_resize_from_h, "Seed resize from-2"),
(denoising_strength, "Denoising strength"),
]
modules.generation_parameters_copypaste.connect_paste(paste, img2img_paste_fields, img2img_prompt)
token_button.click(fn=update_token_counter, inputs=[img2img_prompt, steps], outputs=[token_counter])
with gr.Blocks(analytics_enabled=False) as extras_interface:
with gr.Row().style(equal_height=False):
with gr.Column(variant='panel'):
with gr.Tabs(elem_id="mode_extras"):
with gr.TabItem('Single Image'):
extras_image = gr.Image(label="Source", source="upload", interactive=True, type="pil")
with gr.TabItem('Batch Process'):
image_batch = gr.File(label="Batch Process", file_count="multiple", interactive=True, type="file")
upscaling_resize = gr.Slider(minimum=1.0, maximum=4.0, step=0.05, label="Resize", value=2)
with gr.Group():
extras_upscaler_1 = gr.Radio(label='Upscaler 1', choices=[x.name for x in shared.sd_upscalers], value=shared.sd_upscalers[0].name, type="index")
with gr.Group():
extras_upscaler_2 = gr.Radio(label='Upscaler 2', choices=[x.name for x in shared.sd_upscalers], value=shared.sd_upscalers[0].name, type="index")
extras_upscaler_2_visibility = gr.Slider(minimum=0.0, maximum=1.0, step=0.001, label="Upscaler 2 visibility", value=1)
with gr.Group():
gfpgan_visibility = gr.Slider(minimum=0.0, maximum=1.0, step=0.001, label="GFPGAN visibility", value=0, interactive=modules.gfpgan_model.have_gfpgan)
with gr.Group():
codeformer_visibility = gr.Slider(minimum=0.0, maximum=1.0, step=0.001, label="CodeFormer visibility", value=0, interactive=modules.codeformer_model.have_codeformer)
codeformer_weight = gr.Slider(minimum=0.0, maximum=1.0, step=0.001, label="CodeFormer weight (0 = maximum effect, 1 = minimum effect)", value=0, interactive=modules.codeformer_model.have_codeformer)
submit = gr.Button('Generate', elem_id="extras_generate", variant='primary')
with gr.Column(variant='panel'):
result_images = gr.Gallery(label="Result", show_label=False)
html_info_x = gr.HTML()
html_info = gr.HTML()
extras_send_to_img2img = gr.Button('Send to img2img')
extras_send_to_inpaint = gr.Button('Send to inpaint')
button_id = "hidden_element" if shared.cmd_opts.hide_ui_dir_config else ''
open_extras_folder = gr.Button('Open output directory', elem_id=button_id)
submit.click(
fn=wrap_gradio_gpu_call(modules.extras.run_extras),
_js="get_extras_tab_index",
inputs=[
dummy_component,
extras_image,
image_batch,
gfpgan_visibility,
codeformer_visibility,
codeformer_weight,
upscaling_resize,
extras_upscaler_1,
extras_upscaler_2,
extras_upscaler_2_visibility,
],
outputs=[
result_images,
html_info_x,
html_info,
]
)
extras_send_to_img2img.click(
fn=lambda x: image_from_url_text(x),
_js="extract_image_from_gallery_img2img",
inputs=[result_images],
outputs=[init_img],
)
extras_send_to_inpaint.click(
fn=lambda x: image_from_url_text(x),
_js="extract_image_from_gallery_inpaint",
inputs=[result_images],
outputs=[init_img_with_mask],
)
with gr.Blocks(analytics_enabled=False) as pnginfo_interface:
with gr.Row().style(equal_height=False):
with gr.Column(variant='panel'):
image = gr.Image(elem_id="pnginfo_image", label="Source", source="upload", interactive=True, type="pil")
with gr.Column(variant='panel'):
html = gr.HTML()
generation_info = gr.Textbox(visible=False)
html2 = gr.HTML()
with gr.Row():
pnginfo_send_to_txt2img = gr.Button('Send to txt2img')
pnginfo_send_to_img2img = gr.Button('Send to img2img')
image.change(
fn=wrap_gradio_call(modules.extras.run_pnginfo),
inputs=[image],
outputs=[html, generation_info, html2],
)
with gr.Blocks() as modelmerger_interface:
with gr.Row().style(equal_height=False):
with gr.Column(variant='panel'):
gr.HTML(value="<p>A merger of the two checkpoints will be generated in your <b>checkpoint</b> directory.</p>")
with gr.Row():
primary_model_name = gr.Dropdown(modules.sd_models.checkpoint_tiles(), elem_id="modelmerger_primary_model_name", label="Primary Model Name")
secondary_model_name = gr.Dropdown(modules.sd_models.checkpoint_tiles(), elem_id="modelmerger_secondary_model_name", label="Secondary Model Name")
custom_name = gr.Textbox(label="Custom Name (Optional)")
interp_amount = gr.Slider(minimum=0.0, maximum=1.0, step=0.05, label='Interpolation Amount', value=0.3)
interp_method = gr.Radio(choices=["Weighted Sum", "Sigmoid", "Inverse Sigmoid"], value="Weighted Sum", label="Interpolation Method")
save_as_half = gr.Checkbox(value=False, label="Save as float16")
modelmerger_merge = gr.Button(elem_id="modelmerger_merge", label="Merge", variant='primary')
with gr.Column(variant='panel'):
submit_result = gr.Textbox(elem_id="modelmerger_result", show_label=False)
sd_hijack.model_hijack.embedding_db.load_textual_inversion_embeddings()
with gr.Blocks() as textual_inversion_interface:
with gr.Row().style(equal_height=False):
with gr.Column():
with gr.Group():
gr.HTML(value="<p style='margin-bottom: 0.7em'>See <b><a href=\"https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Textual-Inversion\">wiki</a></b> for detailed explanation.</p>")
gr.HTML(value="<p style='margin-bottom: 0.7em'>Create a new embedding</p>")
new_embedding_name = gr.Textbox(label="Name")
initialization_text = gr.Textbox(label="Initialization text", value="*")
nvpt = gr.Slider(label="Number of vectors per token", minimum=1, maximum=75, step=1, value=1)
with gr.Row():
with gr.Column(scale=3):
gr.HTML(value="")
with gr.Column():
create_embedding = gr.Button(value="Create embedding", variant='primary')
with gr.Group():
gr.HTML(value="<p style='margin-bottom: 0.7em'>Create a new hypernetwork</p>")
new_hypernetwork_name = gr.Textbox(label="Name")
new_hypernetwork_sizes = gr.CheckboxGroup(label="Modules", value=["768", "320", "640", "1280"], choices=["768", "320", "640", "1280"])
with gr.Row():
with gr.Column(scale=3):
gr.HTML(value="")
with gr.Column():
create_hypernetwork = gr.Button(value="Create hypernetwork", variant='primary')
with gr.Group():
gr.HTML(value="<p style='margin-bottom: 0.7em'>Preprocess images</p>")
process_src = gr.Textbox(label='Source directory')
process_dst = gr.Textbox(label='Destination directory')
process_width = gr.Slider(minimum=64, maximum=2048, step=64, label="Width", value=512)
process_height = gr.Slider(minimum=64, maximum=2048, step=64, label="Height", value=512)
with gr.Row():
process_flip = gr.Checkbox(label='Create flipped copies')
process_split = gr.Checkbox(label='Split oversized images into two')
process_caption = gr.Checkbox(label='Use BLIP caption as filename')
with gr.Row():
with gr.Column(scale=3):
gr.HTML(value="")
with gr.Column():
run_preprocess = gr.Button(value="Preprocess", variant='primary')
with gr.Group():
gr.HTML(value="<p style='margin-bottom: 0.7em'>Train an embedding; must specify a directory with a set of 1:1 ratio images</p>")
train_embedding_name = gr.Dropdown(label='Embedding', choices=sorted(sd_hijack.model_hijack.embedding_db.word_embeddings.keys()))
train_hypernetwork_name = gr.Dropdown(label='Hypernetwork', choices=[x for x in shared.hypernetworks.keys()])
learn_rate = gr.Textbox(label='Learning rate', placeholder="Learning rate", value="0.005")
dataset_directory = gr.Textbox(label='Dataset directory', placeholder="Path to directory with input images")
log_directory = gr.Textbox(label='Log directory', placeholder="Path to directory where to write outputs", value="textual_inversion")
template_file = gr.Textbox(label='Prompt template file', value=os.path.join(script_path, "textual_inversion_templates", "style_filewords.txt"))
training_width = gr.Slider(minimum=64, maximum=2048, step=64, label="Width", value=512)
training_height = gr.Slider(minimum=64, maximum=2048, step=64, label="Height", value=512)
steps = gr.Number(label='Max steps', value=100000, precision=0)
num_repeats = gr.Number(label='Number of repeats for a single input image per epoch', value=100, precision=0)
create_image_every = gr.Number(label='Save an image to log directory every N steps, 0 to disable', value=500, precision=0)
save_embedding_every = gr.Number(label='Save a copy of embedding to log directory every N steps, 0 to disable', value=500, precision=0)
preview_image_prompt = gr.Textbox(label='Preview prompt', value="")
with gr.Row():
interrupt_training = gr.Button(value="Interrupt")
train_hypernetwork = gr.Button(value="Train Hypernetwork", variant='primary')
train_embedding = gr.Button(value="Train Embedding", variant='primary')
with gr.Column():
progressbar = gr.HTML(elem_id="ti_progressbar")
ti_output = gr.Text(elem_id="ti_output", value="", show_label=False)
ti_gallery = gr.Gallery(label='Output', show_label=False, elem_id='ti_gallery').style(grid=4)
ti_preview = gr.Image(elem_id='ti_preview', visible=False)
ti_progress = gr.HTML(elem_id="ti_progress", value="")
ti_outcome = gr.HTML(elem_id="ti_error", value="")
setup_progressbar(progressbar, ti_preview, 'ti', textinfo=ti_progress)
create_embedding.click(
fn=modules.textual_inversion.ui.create_embedding,
inputs=[
new_embedding_name,
initialization_text,
nvpt,
],
outputs=[
train_embedding_name,
ti_output,
ti_outcome,
]
)
create_hypernetwork.click(
fn=modules.hypernetworks.ui.create_hypernetwork,
inputs=[
new_hypernetwork_name,
new_hypernetwork_sizes,
],
outputs=[
train_hypernetwork_name,
ti_output,
ti_outcome,
]
)
run_preprocess.click(
fn=wrap_gradio_gpu_call(modules.textual_inversion.ui.preprocess, extra_outputs=[gr.update()]),
_js="start_training_textual_inversion",
inputs=[
process_src,
process_dst,
process_width,
process_height,
process_flip,
process_split,
process_caption,
],
outputs=[
ti_output,
ti_outcome,
],
)
train_embedding.click(
fn=wrap_gradio_gpu_call(modules.textual_inversion.ui.train_embedding, extra_outputs=[gr.update()]),
_js="start_training_textual_inversion",
inputs=[
train_embedding_name,
learn_rate,
dataset_directory,
log_directory,
training_width,
training_height,
steps,
num_repeats,
create_image_every,
save_embedding_every,
template_file,
preview_image_prompt,
],
outputs=[
ti_output,
ti_outcome,
]
)
train_hypernetwork.click(
fn=wrap_gradio_gpu_call(modules.hypernetworks.ui.train_hypernetwork, extra_outputs=[gr.update()]),
_js="start_training_textual_inversion",
inputs=[
train_hypernetwork_name,
learn_rate,
dataset_directory,
log_directory,
steps,
create_image_every,
save_embedding_every,
template_file,
preview_image_prompt,
],
outputs=[
ti_output,
ti_outcome,
]
)
interrupt_training.click(
fn=lambda: shared.state.interrupt(),
inputs=[],
outputs=[],
)
def create_setting_component(key):
def fun():
return opts.data[key] if key in opts.data else opts.data_labels[key].default
info = opts.data_labels[key]
t = type(info.default)
args = info.component_args() if callable(info.component_args) else info.component_args
if info.component is not None:
comp = info.component
elif t == str:
comp = gr.Textbox
elif t == int:
comp = gr.Number
elif t == bool:
comp = gr.Checkbox
else:
raise Exception(f'bad options item type: {str(t)} for key {key}')
return comp(label=info.label, value=fun, **(args or {}))
components = []
component_dict = {}
def open_folder(f):
if not os.path.isdir(f):
print(f"""
WARNING
An open_folder request was made with an argument that is not a folder.
This could be an error or a malicious attempt to run code on your computer.
Requested path was: {f}
""", file=sys.stderr)
return
if not shared.cmd_opts.hide_ui_dir_config:
path = os.path.normpath(f)
if platform.system() == "Windows":
os.startfile(path)
elif platform.system() == "Darwin":
sp.Popen(["open", path])
else:
sp.Popen(["xdg-open", path])
def run_settings(*args):
changed = 0
for key, value, comp in zip(opts.data_labels.keys(), args, components):
if comp != dummy_component and not opts.same_type(value, opts.data_labels[key].default):
return f"Bad value for setting {key}: {value}; expecting {type(opts.data_labels[key].default).__name__}", opts.dumpjson()
for key, value, comp in zip(opts.data_labels.keys(), args, components):
if comp == dummy_component:
continue
comp_args = opts.data_labels[key].component_args
if comp_args and isinstance(comp_args, dict) and comp_args.get('visible') is False:
continue
oldval = opts.data.get(key, None)
opts.data[key] = value
if oldval != value:
if opts.data_labels[key].onchange is not None:
opts.data_labels[key].onchange()
changed += 1
opts.save(shared.config_filename)
return f'{changed} settings changed.', opts.dumpjson()
def run_settings_single(value, key):
if not opts.same_type(value, opts.data_labels[key].default):
return gr.update(visible=True), opts.dumpjson()
oldval = opts.data.get(key, None)
opts.data[key] = value
if oldval != value:
if opts.data_labels[key].onchange is not None:
opts.data_labels[key].onchange()
opts.save(shared.config_filename)
return gr.update(value=value), opts.dumpjson()
with gr.Blocks(analytics_enabled=False) as settings_interface:
settings_submit = gr.Button(value="Apply settings", variant='primary')
result = gr.HTML()
settings_cols = 3
items_per_col = int(len(opts.data_labels) * 0.9 / settings_cols)
quicksettings_list = []
cols_displayed = 0
items_displayed = 0
previous_section = None
column = None
with gr.Row(elem_id="settings").style(equal_height=False):
for i, (k, item) in enumerate(opts.data_labels.items()):
if previous_section != item.section:
if cols_displayed < settings_cols and (items_displayed >= items_per_col or previous_section is None):
if column is not None:
column.__exit__()
column = gr.Column(variant='panel')
column.__enter__()
items_displayed = 0
cols_displayed += 1
previous_section = item.section
gr.HTML(elem_id="settings_header_text_{}".format(item.section[0]), value='<h1 class="gr-button-lg">{}</h1>'.format(item.section[1]))
if item.show_on_main_page:
quicksettings_list.append((i, k, item))
components.append(dummy_component)
else:
component = create_setting_component(k)
component_dict[k] = component
components.append(component)
items_displayed += 1
request_notifications = gr.Button(value='Request browser notifications', elem_id="request_notifications")
request_notifications.click(
fn=lambda: None,
inputs=[],
outputs=[],
_js='function(){}'
)
with gr.Row():
reload_script_bodies = gr.Button(value='Reload custom script bodies (No ui updates, No restart)', variant='secondary')
restart_gradio = gr.Button(value='Restart Gradio and Refresh components (Custom Scripts, ui.py, js and css only)', variant='primary')
def reload_scripts():
modules.scripts.reload_script_body_only()
reload_script_bodies.click(
fn=reload_scripts,
inputs=[],
outputs=[],
_js='function(){}'
)
def request_restart():
shared.state.interrupt()
settings_interface.gradio_ref.do_restart = True
restart_gradio.click(
fn=request_restart,
inputs=[],
outputs=[],
_js='function(){restart_reload()}'
)
if column is not None:
column.__exit__()
interfaces = [
(txt2img_interface, "txt2img", "txt2img"),
(img2img_interface, "img2img", "img2img"),
(extras_interface, "Extras", "extras"),
(pnginfo_interface, "PNG Info", "pnginfo"),
(modelmerger_interface, "Checkpoint Merger", "modelmerger"),
(textual_inversion_interface, "Textual inversion", "ti"),
(settings_interface, "Settings", "settings"),
]
with open(os.path.join(script_path, "style.css"), "r", encoding="utf8") as file:
css = file.read()
if os.path.exists(os.path.join(script_path, "user.css")):
with open(os.path.join(script_path, "user.css"), "r", encoding="utf8") as file:
usercss = file.read()
css += usercss
if not cmd_opts.no_progressbar_hiding:
css += css_hide_progressbar
with gr.Blocks(css=css, analytics_enabled=False, title="Stable Diffusion") as demo:
with gr.Row(elem_id="quicksettings"):
for i, k, item in quicksettings_list:
component = create_setting_component(k)
component_dict[k] = component
settings_interface.gradio_ref = demo
with gr.Tabs() as tabs:
for interface, label, ifid in interfaces:
with gr.TabItem(label, id=ifid, elem_id='tab_' + ifid):
interface.render()
if os.path.exists(os.path.join(script_path, "notification.mp3")):
audio_notification = gr.Audio(interactive=False, value=os.path.join(script_path, "notification.mp3"), elem_id="audio_notification", visible=False)
text_settings = gr.Textbox(elem_id="settings_json", value=lambda: opts.dumpjson(), visible=False)
settings_submit.click(
fn=run_settings,
inputs=components,
outputs=[result, text_settings],
)
for i, k, item in quicksettings_list:
component = component_dict[k]
component.change(
fn=lambda value, k=k: run_settings_single(value, key=k),
inputs=[component],
outputs=[component, text_settings],
)
def modelmerger(*args):
try:
results = modules.extras.run_modelmerger(*args)
except Exception as e:
print("Error loading/saving model file:", file=sys.stderr)
print(traceback.format_exc(), file=sys.stderr)
modules.sd_models.list_models() # to remove the potentially missing models from the list
return ["Error loading/saving model file. It doesn't exist or the name contains illegal characters"] + [gr.Dropdown.update(choices=modules.sd_models.checkpoint_tiles()) for _ in range(3)]
return results
modelmerger_merge.click(
fn=modelmerger,
inputs=[
primary_model_name,
secondary_model_name,
interp_method,
interp_amount,
save_as_half,
custom_name,
],
outputs=[
submit_result,
primary_model_name,
secondary_model_name,
component_dict['sd_model_checkpoint'],
]
)
paste_field_names = ['Prompt', 'Negative prompt', 'Steps', 'Face restoration', 'Seed', 'Size-1', 'Size-2']
txt2img_fields = [field for field,name in txt2img_paste_fields if name in paste_field_names]
img2img_fields = [field for field,name in img2img_paste_fields if name in paste_field_names]
send_to_img2img.click(
fn=lambda img, *args: (image_from_url_text(img),*args),
_js="(gallery, ...args) => [extract_image_from_gallery_img2img(gallery), ...args]",
inputs=[txt2img_gallery] + txt2img_fields,
outputs=[init_img] + img2img_fields,
)
send_to_inpaint.click(
fn=lambda x, *args: (image_from_url_text(x), *args),
_js="(gallery, ...args) => [extract_image_from_gallery_inpaint(gallery), ...args]",
inputs=[txt2img_gallery] + txt2img_fields,
outputs=[init_img_with_mask] + img2img_fields,
)
img2img_send_to_img2img.click(
fn=lambda x: image_from_url_text(x),
_js="extract_image_from_gallery_img2img",
inputs=[img2img_gallery],
outputs=[init_img],
)
img2img_send_to_inpaint.click(
fn=lambda x: image_from_url_text(x),
_js="extract_image_from_gallery_inpaint",
inputs=[img2img_gallery],
outputs=[init_img_with_mask],
)
send_to_extras.click(
fn=lambda x: image_from_url_text(x),
_js="extract_image_from_gallery_extras",
inputs=[txt2img_gallery],
outputs=[extras_image],
)
open_txt2img_folder.click(
fn=lambda: open_folder(opts.outdir_samples or opts.outdir_txt2img_samples),
inputs=[],
outputs=[],
)
open_img2img_folder.click(
fn=lambda: open_folder(opts.outdir_samples or opts.outdir_img2img_samples),
inputs=[],
outputs=[],
)
open_extras_folder.click(
fn=lambda: open_folder(opts.outdir_samples or opts.outdir_extras_samples),
inputs=[],
outputs=[],
)
img2img_send_to_extras.click(
fn=lambda x: image_from_url_text(x),
_js="extract_image_from_gallery_extras",
inputs=[img2img_gallery],
outputs=[extras_image],
)
modules.generation_parameters_copypaste.connect_paste(pnginfo_send_to_txt2img, txt2img_paste_fields, generation_info, 'switch_to_txt2img')
modules.generation_parameters_copypaste.connect_paste(pnginfo_send_to_img2img, img2img_paste_fields, generation_info, 'switch_to_img2img_img2img')
ui_config_file = cmd_opts.ui_config_file
ui_settings = {}
settings_count = len(ui_settings)
error_loading = False
try:
if os.path.exists(ui_config_file):
with open(ui_config_file, "r", encoding="utf8") as file:
ui_settings = json.load(file)
except Exception:
error_loading = True
print("Error loading settings:", file=sys.stderr)
print(traceback.format_exc(), file=sys.stderr)
def loadsave(path, x):
def apply_field(obj, field, condition=None):
key = path + "/" + field
if getattr(obj,'custom_script_source',None) is not None:
key = 'customscript/' + obj.custom_script_source + '/' + key
if getattr(obj, 'do_not_save_to_config', False):
return
saved_value = ui_settings.get(key, None)
if saved_value is None:
ui_settings[key] = getattr(obj, field)
elif condition is None or condition(saved_value):
setattr(obj, field, saved_value)
if type(x) in [gr.Slider, gr.Radio, gr.Checkbox, gr.Textbox, gr.Number] and x.visible:
apply_field(x, 'visible')
if type(x) == gr.Slider:
apply_field(x, 'value')
apply_field(x, 'minimum')
apply_field(x, 'maximum')
apply_field(x, 'step')
if type(x) == gr.Radio:
apply_field(x, 'value', lambda val: val in x.choices)
if type(x) == gr.Checkbox:
apply_field(x, 'value')
if type(x) == gr.Textbox:
apply_field(x, 'value')
if type(x) == gr.Number:
apply_field(x, 'value')
visit(txt2img_interface, loadsave, "txt2img")
visit(img2img_interface, loadsave, "img2img")
visit(extras_interface, loadsave, "extras")
if not error_loading and (not os.path.exists(ui_config_file) or settings_count != len(ui_settings)):
with open(ui_config_file, "w", encoding="utf8") as file:
json.dump(ui_settings, file, indent=4)
return demo
with open(os.path.join(script_path, "script.js"), "r", encoding="utf8") as jsfile:
javascript = f'<script>{jsfile.read()}</script>'
jsdir = os.path.join(script_path, "javascript")
for filename in sorted(os.listdir(jsdir)):
with open(os.path.join(jsdir, filename), "r", encoding="utf8") as jsfile:
javascript += f"\n<script>{jsfile.read()}</script>"
if 'gradio_routes_templates_response' not in globals():
def template_response(*args, **kwargs):
res = gradio_routes_templates_response(*args, **kwargs)
res.body = res.body.replace(b'</head>', f'{javascript}</head>'.encode("utf8"))
res.init_headers()
return res
gradio_routes_templates_response = gradio.routes.templates.TemplateResponse
gradio.routes.templates.TemplateResponse = template_response
<|code_end|>
|
Progress bar percentage is broken
**Describe the bug**
Progress bar finishes on 65-80%, image processing bar and total progress bar are have different steps amount.
I've already seen this issue https://github.com/AUTOMATIC1111/stable-diffusion-webui/issues/3093, I think it's related to this, but it covers a slightly different problem.

Even with "do exactly the amount of steps the slider specifies" setting turned on it still shows strange percentage, for some reason it doubles the amount of steps:

Also, It doesn't work SD upscaling too:

**To Reproduce**
Steps to reproduce the behavior:
1. Go to img2img tab
2. Select an image
3. Press Generate button
4. See incorrect percentage display on progress bar
P.S. Try to use SD upscale or turn on "do exactly the amount of steps the slider specifies" setting to see that problem is still appears.
**Expected behavior**
Total progress bar should have the same amount of steps as sum of all steps for each image processing
**Desktop (please complete the following information):**
- OS: Windows
- Browser: Google Chrome
- Commit revision: f894dd552f68bea27476f1f360ab8e79f3a65b4f
|
modules/sd_samplers.py
<|code_start|>from collections import namedtuple
import numpy as np
import torch
import tqdm
from PIL import Image
import inspect
import k_diffusion.sampling
import ldm.models.diffusion.ddim
import ldm.models.diffusion.plms
from modules import prompt_parser, devices, processing
from modules.shared import opts, cmd_opts, state
import modules.shared as shared
SamplerData = namedtuple('SamplerData', ['name', 'constructor', 'aliases', 'options'])
samplers_k_diffusion = [
('Euler a', 'sample_euler_ancestral', ['k_euler_a'], {}),
('Euler', 'sample_euler', ['k_euler'], {}),
('LMS', 'sample_lms', ['k_lms'], {}),
('Heun', 'sample_heun', ['k_heun'], {}),
('DPM2', 'sample_dpm_2', ['k_dpm_2'], {}),
('DPM2 a', 'sample_dpm_2_ancestral', ['k_dpm_2_a'], {}),
('DPM fast', 'sample_dpm_fast', ['k_dpm_fast'], {}),
('DPM adaptive', 'sample_dpm_adaptive', ['k_dpm_ad'], {}),
('LMS Karras', 'sample_lms', ['k_lms_ka'], {'scheduler': 'karras'}),
('DPM2 Karras', 'sample_dpm_2', ['k_dpm_2_ka'], {'scheduler': 'karras'}),
('DPM2 a Karras', 'sample_dpm_2_ancestral', ['k_dpm_2_a_ka'], {'scheduler': 'karras'}),
]
samplers_data_k_diffusion = [
SamplerData(label, lambda model, funcname=funcname: KDiffusionSampler(funcname, model), aliases, options)
for label, funcname, aliases, options in samplers_k_diffusion
if hasattr(k_diffusion.sampling, funcname)
]
all_samplers = [
*samplers_data_k_diffusion,
SamplerData('DDIM', lambda model: VanillaStableDiffusionSampler(ldm.models.diffusion.ddim.DDIMSampler, model), [], {}),
SamplerData('PLMS', lambda model: VanillaStableDiffusionSampler(ldm.models.diffusion.plms.PLMSSampler, model), [], {}),
]
samplers = []
samplers_for_img2img = []
def create_sampler_with_index(list_of_configs, index, model):
config = list_of_configs[index]
sampler = config.constructor(model)
sampler.config = config
return sampler
def set_samplers():
global samplers, samplers_for_img2img
hidden = set(opts.hide_samplers)
hidden_img2img = set(opts.hide_samplers + ['PLMS'])
samplers = [x for x in all_samplers if x.name not in hidden]
samplers_for_img2img = [x for x in all_samplers if x.name not in hidden_img2img]
set_samplers()
sampler_extra_params = {
'sample_euler': ['s_churn', 's_tmin', 's_tmax', 's_noise'],
'sample_heun': ['s_churn', 's_tmin', 's_tmax', 's_noise'],
'sample_dpm_2': ['s_churn', 's_tmin', 's_tmax', 's_noise'],
}
def setup_img2img_steps(p, steps=None):
if opts.img2img_fix_steps or steps is not None:
steps = int((steps or p.steps) / min(p.denoising_strength, 0.999)) if p.denoising_strength > 0 else 0
t_enc = p.steps - 1
else:
steps = p.steps
t_enc = int(min(p.denoising_strength, 0.999) * steps)
return steps, t_enc
def sample_to_image(samples):
x_sample = processing.decode_first_stage(shared.sd_model, samples[0:1])[0]
x_sample = torch.clamp((x_sample + 1.0) / 2.0, min=0.0, max=1.0)
x_sample = 255. * np.moveaxis(x_sample.cpu().numpy(), 0, 2)
x_sample = x_sample.astype(np.uint8)
return Image.fromarray(x_sample)
def store_latent(decoded):
state.current_latent = decoded
if opts.show_progress_every_n_steps > 0 and shared.state.sampling_step % opts.show_progress_every_n_steps == 0:
if not shared.parallel_processing_allowed:
shared.state.current_image = sample_to_image(decoded)
class InterruptedException(BaseException):
pass
class VanillaStableDiffusionSampler:
def __init__(self, constructor, sd_model):
self.sampler = constructor(sd_model)
self.orig_p_sample_ddim = self.sampler.p_sample_ddim if hasattr(self.sampler, 'p_sample_ddim') else self.sampler.p_sample_plms
self.mask = None
self.nmask = None
self.init_latent = None
self.sampler_noises = None
self.step = 0
self.stop_at = None
self.eta = None
self.default_eta = 0.0
self.config = None
self.last_latent = None
self.conditioning_key = sd_model.model.conditioning_key
def number_of_needed_noises(self, p):
return 0
def launch_sampling(self, steps, func):
state.sampling_steps = steps
state.sampling_step = 0
try:
return func()
except InterruptedException:
return self.last_latent
def p_sample_ddim_hook(self, x_dec, cond, ts, unconditional_conditioning, *args, **kwargs):
if state.interrupted or state.skipped:
raise InterruptedException
if self.stop_at is not None and self.step > self.stop_at:
raise InterruptedException
# Have to unwrap the inpainting conditioning here to perform pre-processing
image_conditioning = None
if isinstance(cond, dict):
image_conditioning = cond["c_concat"][0]
cond = cond["c_crossattn"][0]
unconditional_conditioning = unconditional_conditioning["c_crossattn"][0]
conds_list, tensor = prompt_parser.reconstruct_multicond_batch(cond, self.step)
unconditional_conditioning = prompt_parser.reconstruct_cond_batch(unconditional_conditioning, self.step)
assert all([len(conds) == 1 for conds in conds_list]), 'composition via AND is not supported for DDIM/PLMS samplers'
cond = tensor
# for DDIM, shapes must match, we can't just process cond and uncond independently;
# filling unconditional_conditioning with repeats of the last vector to match length is
# not 100% correct but should work well enough
if unconditional_conditioning.shape[1] < cond.shape[1]:
last_vector = unconditional_conditioning[:, -1:]
last_vector_repeated = last_vector.repeat([1, cond.shape[1] - unconditional_conditioning.shape[1], 1])
unconditional_conditioning = torch.hstack([unconditional_conditioning, last_vector_repeated])
elif unconditional_conditioning.shape[1] > cond.shape[1]:
unconditional_conditioning = unconditional_conditioning[:, :cond.shape[1]]
if self.mask is not None:
img_orig = self.sampler.model.q_sample(self.init_latent, ts)
x_dec = img_orig * self.mask + self.nmask * x_dec
# Wrap the image conditioning back up since the DDIM code can accept the dict directly.
# Note that they need to be lists because it just concatenates them later.
if image_conditioning is not None:
cond = {"c_concat": [image_conditioning], "c_crossattn": [cond]}
unconditional_conditioning = {"c_concat": [image_conditioning], "c_crossattn": [unconditional_conditioning]}
res = self.orig_p_sample_ddim(x_dec, cond, ts, unconditional_conditioning=unconditional_conditioning, *args, **kwargs)
if self.mask is not None:
self.last_latent = self.init_latent * self.mask + self.nmask * res[1]
else:
self.last_latent = res[1]
store_latent(self.last_latent)
self.step += 1
state.sampling_step = self.step
shared.total_tqdm.update()
return res
def initialize(self, p):
self.eta = p.eta if p.eta is not None else opts.eta_ddim
for fieldname in ['p_sample_ddim', 'p_sample_plms']:
if hasattr(self.sampler, fieldname):
setattr(self.sampler, fieldname, self.p_sample_ddim_hook)
self.mask = p.mask if hasattr(p, 'mask') else None
self.nmask = p.nmask if hasattr(p, 'nmask') else None
def sample_img2img(self, p, x, noise, conditioning, unconditional_conditioning, steps=None, image_conditioning=None):
steps, t_enc = setup_img2img_steps(p, steps)
self.initialize(p)
# existing code fails with certain step counts, like 9
try:
self.sampler.make_schedule(ddim_num_steps=steps, ddim_eta=self.eta, ddim_discretize=p.ddim_discretize, verbose=False)
except Exception:
self.sampler.make_schedule(ddim_num_steps=steps+1, ddim_eta=self.eta, ddim_discretize=p.ddim_discretize, verbose=False)
x1 = self.sampler.stochastic_encode(x, torch.tensor([t_enc] * int(x.shape[0])).to(shared.device), noise=noise)
self.init_latent = x
self.last_latent = x
self.step = 0
# Wrap the conditioning models with additional image conditioning for inpainting model
if image_conditioning is not None:
conditioning = {"c_concat": [image_conditioning], "c_crossattn": [conditioning]}
unconditional_conditioning = {"c_concat": [image_conditioning], "c_crossattn": [unconditional_conditioning]}
samples = self.launch_sampling(steps, lambda: self.sampler.decode(x1, conditioning, t_enc, unconditional_guidance_scale=p.cfg_scale, unconditional_conditioning=unconditional_conditioning))
return samples
def sample(self, p, x, conditioning, unconditional_conditioning, steps=None, image_conditioning=None):
self.initialize(p)
self.init_latent = None
self.last_latent = x
self.step = 0
steps = steps or p.steps
# Wrap the conditioning models with additional image conditioning for inpainting model
if image_conditioning is not None:
conditioning = {"c_concat": [image_conditioning], "c_crossattn": [conditioning]}
unconditional_conditioning = {"c_concat": [image_conditioning], "c_crossattn": [unconditional_conditioning]}
# existing code fails with certain step counts, like 9
try:
samples_ddim = self.launch_sampling(steps, lambda: self.sampler.sample(S=steps, conditioning=conditioning, batch_size=int(x.shape[0]), shape=x[0].shape, verbose=False, unconditional_guidance_scale=p.cfg_scale, unconditional_conditioning=unconditional_conditioning, x_T=x, eta=self.eta)[0])
except Exception:
samples_ddim = self.launch_sampling(steps, lambda: self.sampler.sample(S=steps+1, conditioning=conditioning, batch_size=int(x.shape[0]), shape=x[0].shape, verbose=False, unconditional_guidance_scale=p.cfg_scale, unconditional_conditioning=unconditional_conditioning, x_T=x, eta=self.eta)[0])
return samples_ddim
class CFGDenoiser(torch.nn.Module):
def __init__(self, model):
super().__init__()
self.inner_model = model
self.mask = None
self.nmask = None
self.init_latent = None
self.step = 0
def forward(self, x, sigma, uncond, cond, cond_scale, image_cond):
if state.interrupted or state.skipped:
raise InterruptedException
conds_list, tensor = prompt_parser.reconstruct_multicond_batch(cond, self.step)
uncond = prompt_parser.reconstruct_cond_batch(uncond, self.step)
batch_size = len(conds_list)
repeats = [len(conds_list[i]) for i in range(batch_size)]
x_in = torch.cat([torch.stack([x[i] for _ in range(n)]) for i, n in enumerate(repeats)] + [x])
image_cond_in = torch.cat([torch.stack([image_cond[i] for _ in range(n)]) for i, n in enumerate(repeats)] + [image_cond])
sigma_in = torch.cat([torch.stack([sigma[i] for _ in range(n)]) for i, n in enumerate(repeats)] + [sigma])
if tensor.shape[1] == uncond.shape[1]:
cond_in = torch.cat([tensor, uncond])
if shared.batch_cond_uncond:
x_out = self.inner_model(x_in, sigma_in, cond={"c_crossattn": [cond_in], "c_concat": [image_cond_in]})
else:
x_out = torch.zeros_like(x_in)
for batch_offset in range(0, x_out.shape[0], batch_size):
a = batch_offset
b = a + batch_size
x_out[a:b] = self.inner_model(x_in[a:b], sigma_in[a:b], cond={"c_crossattn": [cond_in[a:b]], "c_concat": [image_cond_in[a:b]]})
else:
x_out = torch.zeros_like(x_in)
batch_size = batch_size*2 if shared.batch_cond_uncond else batch_size
for batch_offset in range(0, tensor.shape[0], batch_size):
a = batch_offset
b = min(a + batch_size, tensor.shape[0])
x_out[a:b] = self.inner_model(x_in[a:b], sigma_in[a:b], cond={"c_crossattn": [tensor[a:b]], "c_concat": [image_cond_in[a:b]]})
x_out[-uncond.shape[0]:] = self.inner_model(x_in[-uncond.shape[0]:], sigma_in[-uncond.shape[0]:], cond={"c_crossattn": [uncond], "c_concat": [image_cond_in[-uncond.shape[0]:]]})
denoised_uncond = x_out[-uncond.shape[0]:]
denoised = torch.clone(denoised_uncond)
for i, conds in enumerate(conds_list):
for cond_index, weight in conds:
denoised[i] += (x_out[cond_index] - denoised_uncond[i]) * (weight * cond_scale)
if self.mask is not None:
denoised = self.init_latent * self.mask + self.nmask * denoised
self.step += 1
return denoised
class TorchHijack:
def __init__(self, kdiff_sampler):
self.kdiff_sampler = kdiff_sampler
def __getattr__(self, item):
if item == 'randn_like':
return self.kdiff_sampler.randn_like
if hasattr(torch, item):
return getattr(torch, item)
raise AttributeError("'{}' object has no attribute '{}'".format(type(self).__name__, item))
class KDiffusionSampler:
def __init__(self, funcname, sd_model):
self.model_wrap = k_diffusion.external.CompVisDenoiser(sd_model, quantize=shared.opts.enable_quantization)
self.funcname = funcname
self.func = getattr(k_diffusion.sampling, self.funcname)
self.extra_params = sampler_extra_params.get(funcname, [])
self.model_wrap_cfg = CFGDenoiser(self.model_wrap)
self.sampler_noises = None
self.sampler_noise_index = 0
self.stop_at = None
self.eta = None
self.default_eta = 1.0
self.config = None
self.last_latent = None
self.conditioning_key = sd_model.model.conditioning_key
def callback_state(self, d):
step = d['i']
latent = d["denoised"]
store_latent(latent)
self.last_latent = latent
if self.stop_at is not None and step > self.stop_at:
raise InterruptedException
state.sampling_step = step
shared.total_tqdm.update()
def launch_sampling(self, steps, func):
state.sampling_steps = steps
state.sampling_step = 0
try:
return func()
except InterruptedException:
return self.last_latent
def number_of_needed_noises(self, p):
return p.steps
def randn_like(self, x):
noise = self.sampler_noises[self.sampler_noise_index] if self.sampler_noises is not None and self.sampler_noise_index < len(self.sampler_noises) else None
if noise is not None and x.shape == noise.shape:
res = noise
else:
res = torch.randn_like(x)
self.sampler_noise_index += 1
return res
def initialize(self, p):
self.model_wrap_cfg.mask = p.mask if hasattr(p, 'mask') else None
self.model_wrap_cfg.nmask = p.nmask if hasattr(p, 'nmask') else None
self.model_wrap.step = 0
self.sampler_noise_index = 0
self.eta = p.eta or opts.eta_ancestral
if self.sampler_noises is not None:
k_diffusion.sampling.torch = TorchHijack(self)
extra_params_kwargs = {}
for param_name in self.extra_params:
if hasattr(p, param_name) and param_name in inspect.signature(self.func).parameters:
extra_params_kwargs[param_name] = getattr(p, param_name)
if 'eta' in inspect.signature(self.func).parameters:
extra_params_kwargs['eta'] = self.eta
return extra_params_kwargs
def sample_img2img(self, p, x, noise, conditioning, unconditional_conditioning, steps=None, image_conditioning=None):
steps, t_enc = setup_img2img_steps(p, steps)
if p.sampler_noise_scheduler_override:
sigmas = p.sampler_noise_scheduler_override(steps)
elif self.config is not None and self.config.options.get('scheduler', None) == 'karras':
sigmas = k_diffusion.sampling.get_sigmas_karras(n=steps, sigma_min=0.1, sigma_max=10, device=shared.device)
else:
sigmas = self.model_wrap.get_sigmas(steps)
sigma_sched = sigmas[steps - t_enc - 1:]
xi = x + noise * sigma_sched[0]
extra_params_kwargs = self.initialize(p)
if 'sigma_min' in inspect.signature(self.func).parameters:
## last sigma is zero which isn't allowed by DPM Fast & Adaptive so taking value before last
extra_params_kwargs['sigma_min'] = sigma_sched[-2]
if 'sigma_max' in inspect.signature(self.func).parameters:
extra_params_kwargs['sigma_max'] = sigma_sched[0]
if 'n' in inspect.signature(self.func).parameters:
extra_params_kwargs['n'] = len(sigma_sched) - 1
if 'sigma_sched' in inspect.signature(self.func).parameters:
extra_params_kwargs['sigma_sched'] = sigma_sched
if 'sigmas' in inspect.signature(self.func).parameters:
extra_params_kwargs['sigmas'] = sigma_sched
self.model_wrap_cfg.init_latent = x
self.last_latent = x
samples = self.launch_sampling(steps, lambda: self.func(self.model_wrap_cfg, xi, extra_args={
'cond': conditioning,
'image_cond': image_conditioning,
'uncond': unconditional_conditioning,
'cond_scale': p.cfg_scale
}, disable=False, callback=self.callback_state, **extra_params_kwargs))
return samples
def sample(self, p, x, conditioning, unconditional_conditioning, steps=None, image_conditioning = None):
steps = steps or p.steps
if p.sampler_noise_scheduler_override:
sigmas = p.sampler_noise_scheduler_override(steps)
elif self.config is not None and self.config.options.get('scheduler', None) == 'karras':
sigmas = k_diffusion.sampling.get_sigmas_karras(n=steps, sigma_min=0.1, sigma_max=10, device=shared.device)
else:
sigmas = self.model_wrap.get_sigmas(steps)
x = x * sigmas[0]
extra_params_kwargs = self.initialize(p)
if 'sigma_min' in inspect.signature(self.func).parameters:
extra_params_kwargs['sigma_min'] = self.model_wrap.sigmas[0].item()
extra_params_kwargs['sigma_max'] = self.model_wrap.sigmas[-1].item()
if 'n' in inspect.signature(self.func).parameters:
extra_params_kwargs['n'] = steps
else:
extra_params_kwargs['sigmas'] = sigmas
self.last_latent = x
samples = self.launch_sampling(steps, lambda: self.func(self.model_wrap_cfg, x, extra_args={
'cond': conditioning,
'image_cond': image_conditioning,
'uncond': unconditional_conditioning,
'cond_scale': p.cfg_scale
}, disable=False, callback=self.callback_state, **extra_params_kwargs))
return samples
<|code_end|>
|
modules/sd_samplers.py
<|code_start|>from collections import namedtuple
import numpy as np
import torch
import tqdm
from PIL import Image
import inspect
import k_diffusion.sampling
import ldm.models.diffusion.ddim
import ldm.models.diffusion.plms
from modules import prompt_parser, devices, processing
from modules.shared import opts, cmd_opts, state
import modules.shared as shared
SamplerData = namedtuple('SamplerData', ['name', 'constructor', 'aliases', 'options'])
samplers_k_diffusion = [
('Euler a', 'sample_euler_ancestral', ['k_euler_a'], {}),
('Euler', 'sample_euler', ['k_euler'], {}),
('LMS', 'sample_lms', ['k_lms'], {}),
('Heun', 'sample_heun', ['k_heun'], {}),
('DPM2', 'sample_dpm_2', ['k_dpm_2'], {}),
('DPM2 a', 'sample_dpm_2_ancestral', ['k_dpm_2_a'], {}),
('DPM fast', 'sample_dpm_fast', ['k_dpm_fast'], {}),
('DPM adaptive', 'sample_dpm_adaptive', ['k_dpm_ad'], {}),
('LMS Karras', 'sample_lms', ['k_lms_ka'], {'scheduler': 'karras'}),
('DPM2 Karras', 'sample_dpm_2', ['k_dpm_2_ka'], {'scheduler': 'karras'}),
('DPM2 a Karras', 'sample_dpm_2_ancestral', ['k_dpm_2_a_ka'], {'scheduler': 'karras'}),
]
samplers_data_k_diffusion = [
SamplerData(label, lambda model, funcname=funcname: KDiffusionSampler(funcname, model), aliases, options)
for label, funcname, aliases, options in samplers_k_diffusion
if hasattr(k_diffusion.sampling, funcname)
]
all_samplers = [
*samplers_data_k_diffusion,
SamplerData('DDIM', lambda model: VanillaStableDiffusionSampler(ldm.models.diffusion.ddim.DDIMSampler, model), [], {}),
SamplerData('PLMS', lambda model: VanillaStableDiffusionSampler(ldm.models.diffusion.plms.PLMSSampler, model), [], {}),
]
samplers = []
samplers_for_img2img = []
def create_sampler_with_index(list_of_configs, index, model):
config = list_of_configs[index]
sampler = config.constructor(model)
sampler.config = config
return sampler
def set_samplers():
global samplers, samplers_for_img2img
hidden = set(opts.hide_samplers)
hidden_img2img = set(opts.hide_samplers + ['PLMS'])
samplers = [x for x in all_samplers if x.name not in hidden]
samplers_for_img2img = [x for x in all_samplers if x.name not in hidden_img2img]
set_samplers()
sampler_extra_params = {
'sample_euler': ['s_churn', 's_tmin', 's_tmax', 's_noise'],
'sample_heun': ['s_churn', 's_tmin', 's_tmax', 's_noise'],
'sample_dpm_2': ['s_churn', 's_tmin', 's_tmax', 's_noise'],
}
def setup_img2img_steps(p, steps=None):
if opts.img2img_fix_steps or steps is not None:
steps = int((steps or p.steps) / min(p.denoising_strength, 0.999)) if p.denoising_strength > 0 else 0
t_enc = p.steps - 1
else:
steps = p.steps
t_enc = int(min(p.denoising_strength, 0.999) * steps)
return steps, t_enc
def sample_to_image(samples):
x_sample = processing.decode_first_stage(shared.sd_model, samples[0:1])[0]
x_sample = torch.clamp((x_sample + 1.0) / 2.0, min=0.0, max=1.0)
x_sample = 255. * np.moveaxis(x_sample.cpu().numpy(), 0, 2)
x_sample = x_sample.astype(np.uint8)
return Image.fromarray(x_sample)
def store_latent(decoded):
state.current_latent = decoded
if opts.show_progress_every_n_steps > 0 and shared.state.sampling_step % opts.show_progress_every_n_steps == 0:
if not shared.parallel_processing_allowed:
shared.state.current_image = sample_to_image(decoded)
class InterruptedException(BaseException):
pass
class VanillaStableDiffusionSampler:
def __init__(self, constructor, sd_model):
self.sampler = constructor(sd_model)
self.orig_p_sample_ddim = self.sampler.p_sample_ddim if hasattr(self.sampler, 'p_sample_ddim') else self.sampler.p_sample_plms
self.mask = None
self.nmask = None
self.init_latent = None
self.sampler_noises = None
self.step = 0
self.stop_at = None
self.eta = None
self.default_eta = 0.0
self.config = None
self.last_latent = None
self.conditioning_key = sd_model.model.conditioning_key
def number_of_needed_noises(self, p):
return 0
def launch_sampling(self, steps, func):
state.sampling_steps = steps
state.sampling_step = 0
try:
return func()
except InterruptedException:
return self.last_latent
def p_sample_ddim_hook(self, x_dec, cond, ts, unconditional_conditioning, *args, **kwargs):
if state.interrupted or state.skipped:
raise InterruptedException
if self.stop_at is not None and self.step > self.stop_at:
raise InterruptedException
# Have to unwrap the inpainting conditioning here to perform pre-processing
image_conditioning = None
if isinstance(cond, dict):
image_conditioning = cond["c_concat"][0]
cond = cond["c_crossattn"][0]
unconditional_conditioning = unconditional_conditioning["c_crossattn"][0]
conds_list, tensor = prompt_parser.reconstruct_multicond_batch(cond, self.step)
unconditional_conditioning = prompt_parser.reconstruct_cond_batch(unconditional_conditioning, self.step)
assert all([len(conds) == 1 for conds in conds_list]), 'composition via AND is not supported for DDIM/PLMS samplers'
cond = tensor
# for DDIM, shapes must match, we can't just process cond and uncond independently;
# filling unconditional_conditioning with repeats of the last vector to match length is
# not 100% correct but should work well enough
if unconditional_conditioning.shape[1] < cond.shape[1]:
last_vector = unconditional_conditioning[:, -1:]
last_vector_repeated = last_vector.repeat([1, cond.shape[1] - unconditional_conditioning.shape[1], 1])
unconditional_conditioning = torch.hstack([unconditional_conditioning, last_vector_repeated])
elif unconditional_conditioning.shape[1] > cond.shape[1]:
unconditional_conditioning = unconditional_conditioning[:, :cond.shape[1]]
if self.mask is not None:
img_orig = self.sampler.model.q_sample(self.init_latent, ts)
x_dec = img_orig * self.mask + self.nmask * x_dec
# Wrap the image conditioning back up since the DDIM code can accept the dict directly.
# Note that they need to be lists because it just concatenates them later.
if image_conditioning is not None:
cond = {"c_concat": [image_conditioning], "c_crossattn": [cond]}
unconditional_conditioning = {"c_concat": [image_conditioning], "c_crossattn": [unconditional_conditioning]}
res = self.orig_p_sample_ddim(x_dec, cond, ts, unconditional_conditioning=unconditional_conditioning, *args, **kwargs)
if self.mask is not None:
self.last_latent = self.init_latent * self.mask + self.nmask * res[1]
else:
self.last_latent = res[1]
store_latent(self.last_latent)
self.step += 1
state.sampling_step = self.step
shared.total_tqdm.update()
return res
def initialize(self, p):
self.eta = p.eta if p.eta is not None else opts.eta_ddim
for fieldname in ['p_sample_ddim', 'p_sample_plms']:
if hasattr(self.sampler, fieldname):
setattr(self.sampler, fieldname, self.p_sample_ddim_hook)
self.mask = p.mask if hasattr(p, 'mask') else None
self.nmask = p.nmask if hasattr(p, 'nmask') else None
def sample_img2img(self, p, x, noise, conditioning, unconditional_conditioning, steps=None, image_conditioning=None):
steps, t_enc = setup_img2img_steps(p, steps)
self.initialize(p)
# existing code fails with certain step counts, like 9
try:
self.sampler.make_schedule(ddim_num_steps=steps, ddim_eta=self.eta, ddim_discretize=p.ddim_discretize, verbose=False)
except Exception:
self.sampler.make_schedule(ddim_num_steps=steps+1, ddim_eta=self.eta, ddim_discretize=p.ddim_discretize, verbose=False)
x1 = self.sampler.stochastic_encode(x, torch.tensor([t_enc] * int(x.shape[0])).to(shared.device), noise=noise)
self.init_latent = x
self.last_latent = x
self.step = 0
# Wrap the conditioning models with additional image conditioning for inpainting model
if image_conditioning is not None:
conditioning = {"c_concat": [image_conditioning], "c_crossattn": [conditioning]}
unconditional_conditioning = {"c_concat": [image_conditioning], "c_crossattn": [unconditional_conditioning]}
samples = self.launch_sampling(t_enc + 1, lambda: self.sampler.decode(x1, conditioning, t_enc, unconditional_guidance_scale=p.cfg_scale, unconditional_conditioning=unconditional_conditioning))
return samples
def sample(self, p, x, conditioning, unconditional_conditioning, steps=None, image_conditioning=None):
self.initialize(p)
self.init_latent = None
self.last_latent = x
self.step = 0
steps = steps or p.steps
# Wrap the conditioning models with additional image conditioning for inpainting model
if image_conditioning is not None:
conditioning = {"c_concat": [image_conditioning], "c_crossattn": [conditioning]}
unconditional_conditioning = {"c_concat": [image_conditioning], "c_crossattn": [unconditional_conditioning]}
# existing code fails with certain step counts, like 9
try:
samples_ddim = self.launch_sampling(steps, lambda: self.sampler.sample(S=steps, conditioning=conditioning, batch_size=int(x.shape[0]), shape=x[0].shape, verbose=False, unconditional_guidance_scale=p.cfg_scale, unconditional_conditioning=unconditional_conditioning, x_T=x, eta=self.eta)[0])
except Exception:
samples_ddim = self.launch_sampling(steps, lambda: self.sampler.sample(S=steps+1, conditioning=conditioning, batch_size=int(x.shape[0]), shape=x[0].shape, verbose=False, unconditional_guidance_scale=p.cfg_scale, unconditional_conditioning=unconditional_conditioning, x_T=x, eta=self.eta)[0])
return samples_ddim
class CFGDenoiser(torch.nn.Module):
def __init__(self, model):
super().__init__()
self.inner_model = model
self.mask = None
self.nmask = None
self.init_latent = None
self.step = 0
def forward(self, x, sigma, uncond, cond, cond_scale, image_cond):
if state.interrupted or state.skipped:
raise InterruptedException
conds_list, tensor = prompt_parser.reconstruct_multicond_batch(cond, self.step)
uncond = prompt_parser.reconstruct_cond_batch(uncond, self.step)
batch_size = len(conds_list)
repeats = [len(conds_list[i]) for i in range(batch_size)]
x_in = torch.cat([torch.stack([x[i] for _ in range(n)]) for i, n in enumerate(repeats)] + [x])
image_cond_in = torch.cat([torch.stack([image_cond[i] for _ in range(n)]) for i, n in enumerate(repeats)] + [image_cond])
sigma_in = torch.cat([torch.stack([sigma[i] for _ in range(n)]) for i, n in enumerate(repeats)] + [sigma])
if tensor.shape[1] == uncond.shape[1]:
cond_in = torch.cat([tensor, uncond])
if shared.batch_cond_uncond:
x_out = self.inner_model(x_in, sigma_in, cond={"c_crossattn": [cond_in], "c_concat": [image_cond_in]})
else:
x_out = torch.zeros_like(x_in)
for batch_offset in range(0, x_out.shape[0], batch_size):
a = batch_offset
b = a + batch_size
x_out[a:b] = self.inner_model(x_in[a:b], sigma_in[a:b], cond={"c_crossattn": [cond_in[a:b]], "c_concat": [image_cond_in[a:b]]})
else:
x_out = torch.zeros_like(x_in)
batch_size = batch_size*2 if shared.batch_cond_uncond else batch_size
for batch_offset in range(0, tensor.shape[0], batch_size):
a = batch_offset
b = min(a + batch_size, tensor.shape[0])
x_out[a:b] = self.inner_model(x_in[a:b], sigma_in[a:b], cond={"c_crossattn": [tensor[a:b]], "c_concat": [image_cond_in[a:b]]})
x_out[-uncond.shape[0]:] = self.inner_model(x_in[-uncond.shape[0]:], sigma_in[-uncond.shape[0]:], cond={"c_crossattn": [uncond], "c_concat": [image_cond_in[-uncond.shape[0]:]]})
denoised_uncond = x_out[-uncond.shape[0]:]
denoised = torch.clone(denoised_uncond)
for i, conds in enumerate(conds_list):
for cond_index, weight in conds:
denoised[i] += (x_out[cond_index] - denoised_uncond[i]) * (weight * cond_scale)
if self.mask is not None:
denoised = self.init_latent * self.mask + self.nmask * denoised
self.step += 1
return denoised
class TorchHijack:
def __init__(self, kdiff_sampler):
self.kdiff_sampler = kdiff_sampler
def __getattr__(self, item):
if item == 'randn_like':
return self.kdiff_sampler.randn_like
if hasattr(torch, item):
return getattr(torch, item)
raise AttributeError("'{}' object has no attribute '{}'".format(type(self).__name__, item))
class KDiffusionSampler:
def __init__(self, funcname, sd_model):
self.model_wrap = k_diffusion.external.CompVisDenoiser(sd_model, quantize=shared.opts.enable_quantization)
self.funcname = funcname
self.func = getattr(k_diffusion.sampling, self.funcname)
self.extra_params = sampler_extra_params.get(funcname, [])
self.model_wrap_cfg = CFGDenoiser(self.model_wrap)
self.sampler_noises = None
self.sampler_noise_index = 0
self.stop_at = None
self.eta = None
self.default_eta = 1.0
self.config = None
self.last_latent = None
self.conditioning_key = sd_model.model.conditioning_key
def callback_state(self, d):
step = d['i']
latent = d["denoised"]
store_latent(latent)
self.last_latent = latent
if self.stop_at is not None and step > self.stop_at:
raise InterruptedException
state.sampling_step = step
shared.total_tqdm.update()
def launch_sampling(self, steps, func):
state.sampling_steps = steps
state.sampling_step = 0
try:
return func()
except InterruptedException:
return self.last_latent
def number_of_needed_noises(self, p):
return p.steps
def randn_like(self, x):
noise = self.sampler_noises[self.sampler_noise_index] if self.sampler_noises is not None and self.sampler_noise_index < len(self.sampler_noises) else None
if noise is not None and x.shape == noise.shape:
res = noise
else:
res = torch.randn_like(x)
self.sampler_noise_index += 1
return res
def initialize(self, p):
self.model_wrap_cfg.mask = p.mask if hasattr(p, 'mask') else None
self.model_wrap_cfg.nmask = p.nmask if hasattr(p, 'nmask') else None
self.model_wrap.step = 0
self.sampler_noise_index = 0
self.eta = p.eta or opts.eta_ancestral
if self.sampler_noises is not None:
k_diffusion.sampling.torch = TorchHijack(self)
extra_params_kwargs = {}
for param_name in self.extra_params:
if hasattr(p, param_name) and param_name in inspect.signature(self.func).parameters:
extra_params_kwargs[param_name] = getattr(p, param_name)
if 'eta' in inspect.signature(self.func).parameters:
extra_params_kwargs['eta'] = self.eta
return extra_params_kwargs
def sample_img2img(self, p, x, noise, conditioning, unconditional_conditioning, steps=None, image_conditioning=None):
steps, t_enc = setup_img2img_steps(p, steps)
if p.sampler_noise_scheduler_override:
sigmas = p.sampler_noise_scheduler_override(steps)
elif self.config is not None and self.config.options.get('scheduler', None) == 'karras':
sigmas = k_diffusion.sampling.get_sigmas_karras(n=steps, sigma_min=0.1, sigma_max=10, device=shared.device)
else:
sigmas = self.model_wrap.get_sigmas(steps)
sigma_sched = sigmas[steps - t_enc - 1:]
xi = x + noise * sigma_sched[0]
extra_params_kwargs = self.initialize(p)
if 'sigma_min' in inspect.signature(self.func).parameters:
## last sigma is zero which isn't allowed by DPM Fast & Adaptive so taking value before last
extra_params_kwargs['sigma_min'] = sigma_sched[-2]
if 'sigma_max' in inspect.signature(self.func).parameters:
extra_params_kwargs['sigma_max'] = sigma_sched[0]
if 'n' in inspect.signature(self.func).parameters:
extra_params_kwargs['n'] = len(sigma_sched) - 1
if 'sigma_sched' in inspect.signature(self.func).parameters:
extra_params_kwargs['sigma_sched'] = sigma_sched
if 'sigmas' in inspect.signature(self.func).parameters:
extra_params_kwargs['sigmas'] = sigma_sched
self.model_wrap_cfg.init_latent = x
self.last_latent = x
samples = self.launch_sampling(t_enc + 1, lambda: self.func(self.model_wrap_cfg, xi, extra_args={
'cond': conditioning,
'image_cond': image_conditioning,
'uncond': unconditional_conditioning,
'cond_scale': p.cfg_scale
}, disable=False, callback=self.callback_state, **extra_params_kwargs))
return samples
def sample(self, p, x, conditioning, unconditional_conditioning, steps=None, image_conditioning = None):
steps = steps or p.steps
if p.sampler_noise_scheduler_override:
sigmas = p.sampler_noise_scheduler_override(steps)
elif self.config is not None and self.config.options.get('scheduler', None) == 'karras':
sigmas = k_diffusion.sampling.get_sigmas_karras(n=steps, sigma_min=0.1, sigma_max=10, device=shared.device)
else:
sigmas = self.model_wrap.get_sigmas(steps)
x = x * sigmas[0]
extra_params_kwargs = self.initialize(p)
if 'sigma_min' in inspect.signature(self.func).parameters:
extra_params_kwargs['sigma_min'] = self.model_wrap.sigmas[0].item()
extra_params_kwargs['sigma_max'] = self.model_wrap.sigmas[-1].item()
if 'n' in inspect.signature(self.func).parameters:
extra_params_kwargs['n'] = steps
else:
extra_params_kwargs['sigmas'] = sigmas
self.last_latent = x
samples = self.launch_sampling(steps, lambda: self.func(self.model_wrap_cfg, x, extra_args={
'cond': conditioning,
'image_cond': image_conditioning,
'uncond': unconditional_conditioning,
'cond_scale': p.cfg_scale
}, disable=False, callback=self.callback_state, **extra_params_kwargs))
return samples
<|code_end|>
|
[Bug]: TypeError: 'NoneType' object is not subscriptable when using img2img alternative test
### Is there an existing issue for this?
- [X] I have searched the existing issues and checked the recent builds/commits
### What happened?
Webui fails to generate an image if using the img2img alternative test since the runway inpainting support update.
Unrelated to this, it does not support the unlimited tokens either.
### Steps to reproduce the problem
1. Go to imgimg
2. Choose img2img alternative test
3. Upload image and write prompt, parameters, settings, etc.
4. Generate.
### What should have happened?
An image should generate.
### Commit where the problem happens
bf30673f5132c8f28357b31224c54331e788d3e7
### What platforms do you use to access UI ?
Windows
### What browsers do you use to access the UI ?
Google Chrome
### Command Line Arguments
```Shell
--deepdanbooru --xformers --gradio-img2img-tool color-sketch
```
### Additional information, context and logs
Traceback (most recent call last):
File "G:\stable-webui\modules\ui.py", line 212, in f
res = list(func(*args, **kwargs))
File "G:\stable-webui\webui.py", line 63, in f
res = func(*args, **kwargs)
File "G:\stable-webui\modules\img2img.py", line 124, in img2img
processed = modules.scripts.scripts_img2img.run(p, *args)
File "G:\stable-webui\modules\scripts.py", line 173, in run
processed = script.run(p, *script_args)
File "G:\stable-webui\scripts\img2imgalt.py", line 208, in run
processed = processing.process_images(p)
File "G:\stable-webui\modules\processing.py", line 411, in process_images
samples_ddim = p.sample(conditioning=c, unconditional_conditioning=uc, seeds=seeds, subseeds=subseeds, subseed_strength=p.subseed_strength)
File "G:\stable-webui\scripts\img2imgalt.py", line 197, in sample_extra
return sampler.sample_img2img(p, p.init_latent, noise_dt, conditioning, unconditional_conditioning)
File "G:\stable-webui\modules\sd_samplers.py", line 423, in sample_img2img
samples = self.launch_sampling(steps, lambda: self.func(self.model_wrap_cfg, xi, extra_args={
File "G:\stable-webui\modules\sd_samplers.py", line 356, in launch_sampling
return func()
File "G:\stable-webui\modules\sd_samplers.py", line 423, in <lambda>
samples = self.launch_sampling(steps, lambda: self.func(self.model_wrap_cfg, xi, extra_args={
File "G:\stable-webui\venv\lib\site-packages\torch\autograd\grad_mode.py", line 27, in decorate_context
return func(*args, **kwargs)
File "G:\stable-webui\repositories\k-diffusion\k_diffusion\sampling.py", line 64, in sample_euler
denoised = model(x, sigma_hat * s_in, **extra_args)
File "G:\stable-webui\venv\lib\site-packages\torch\nn\modules\module.py", line 1130, in _call_impl
return forward_call(*input, **kwargs)
File "G:\stable-webui\modules\sd_samplers.py", line 269, in forward
image_cond_in = torch.cat([torch.stack([image_cond[i] for _ in range(n)]) for i, n in enumerate(repeats)] + [image_cond])
File "G:\stable-webui\modules\sd_samplers.py", line 269, in <listcomp>
image_cond_in = torch.cat([torch.stack([image_cond[i] for _ in range(n)]) for i, n in enumerate(repeats)] + [image_cond])
File "G:\stable-webui\modules\sd_samplers.py", line 269, in <listcomp>
image_cond_in = torch.cat([torch.stack([image_cond[i] for _ in range(n)]) for i, n in enumerate(repeats)] + [image_cond])
TypeError: 'NoneType' object is not subscriptable
|
scripts/img2imgalt.py
<|code_start|>from collections import namedtuple
import numpy as np
from tqdm import trange
import modules.scripts as scripts
import gradio as gr
from modules import processing, shared, sd_samplers, prompt_parser
from modules.processing import Processed
from modules.shared import opts, cmd_opts, state
import torch
import k_diffusion as K
from PIL import Image
from torch import autocast
from einops import rearrange, repeat
def find_noise_for_image(p, cond, uncond, cfg_scale, steps):
x = p.init_latent
s_in = x.new_ones([x.shape[0]])
dnw = K.external.CompVisDenoiser(shared.sd_model)
sigmas = dnw.get_sigmas(steps).flip(0)
shared.state.sampling_steps = steps
for i in trange(1, len(sigmas)):
shared.state.sampling_step += 1
x_in = torch.cat([x] * 2)
sigma_in = torch.cat([sigmas[i] * s_in] * 2)
cond_in = torch.cat([uncond, cond])
c_out, c_in = [K.utils.append_dims(k, x_in.ndim) for k in dnw.get_scalings(sigma_in)]
t = dnw.sigma_to_t(sigma_in)
eps = shared.sd_model.apply_model(x_in * c_in, t, cond=cond_in)
denoised_uncond, denoised_cond = (x_in + eps * c_out).chunk(2)
denoised = denoised_uncond + (denoised_cond - denoised_uncond) * cfg_scale
d = (x - denoised) / sigmas[i]
dt = sigmas[i] - sigmas[i - 1]
x = x + d * dt
sd_samplers.store_latent(x)
# This shouldn't be necessary, but solved some VRAM issues
del x_in, sigma_in, cond_in, c_out, c_in, t,
del eps, denoised_uncond, denoised_cond, denoised, d, dt
shared.state.nextjob()
return x / x.std()
Cached = namedtuple("Cached", ["noise", "cfg_scale", "steps", "latent", "original_prompt", "original_negative_prompt", "sigma_adjustment"])
# Based on changes suggested by briansemrau in https://github.com/AUTOMATIC1111/stable-diffusion-webui/issues/736
def find_noise_for_image_sigma_adjustment(p, cond, uncond, cfg_scale, steps):
x = p.init_latent
s_in = x.new_ones([x.shape[0]])
dnw = K.external.CompVisDenoiser(shared.sd_model)
sigmas = dnw.get_sigmas(steps).flip(0)
shared.state.sampling_steps = steps
for i in trange(1, len(sigmas)):
shared.state.sampling_step += 1
x_in = torch.cat([x] * 2)
sigma_in = torch.cat([sigmas[i - 1] * s_in] * 2)
cond_in = torch.cat([uncond, cond])
c_out, c_in = [K.utils.append_dims(k, x_in.ndim) for k in dnw.get_scalings(sigma_in)]
if i == 1:
t = dnw.sigma_to_t(torch.cat([sigmas[i] * s_in] * 2))
else:
t = dnw.sigma_to_t(sigma_in)
eps = shared.sd_model.apply_model(x_in * c_in, t, cond=cond_in)
denoised_uncond, denoised_cond = (x_in + eps * c_out).chunk(2)
denoised = denoised_uncond + (denoised_cond - denoised_uncond) * cfg_scale
if i == 1:
d = (x - denoised) / (2 * sigmas[i])
else:
d = (x - denoised) / sigmas[i - 1]
dt = sigmas[i] - sigmas[i - 1]
x = x + d * dt
sd_samplers.store_latent(x)
# This shouldn't be necessary, but solved some VRAM issues
del x_in, sigma_in, cond_in, c_out, c_in, t,
del eps, denoised_uncond, denoised_cond, denoised, d, dt
shared.state.nextjob()
return x / sigmas[-1]
class Script(scripts.Script):
def __init__(self):
self.cache = None
def title(self):
return "img2img alternative test"
def show(self, is_img2img):
return is_img2img
def ui(self, is_img2img):
info = gr.Markdown('''
* `CFG Scale` should be 2 or lower.
''')
override_sampler = gr.Checkbox(label="Override `Sampling method` to Euler?(this method is built for it)", value=True)
override_prompt = gr.Checkbox(label="Override `prompt` to the same value as `original prompt`?(and `negative prompt`)", value=True)
original_prompt = gr.Textbox(label="Original prompt", lines=1)
original_negative_prompt = gr.Textbox(label="Original negative prompt", lines=1)
override_steps = gr.Checkbox(label="Override `Sampling Steps` to the same value as `Decode steps`?", value=True)
st = gr.Slider(label="Decode steps", minimum=1, maximum=150, step=1, value=50)
override_strength = gr.Checkbox(label="Override `Denoising strength` to 1?", value=True)
cfg = gr.Slider(label="Decode CFG scale", minimum=0.0, maximum=15.0, step=0.1, value=1.0)
randomness = gr.Slider(label="Randomness", minimum=0.0, maximum=1.0, step=0.01, value=0.0)
sigma_adjustment = gr.Checkbox(label="Sigma adjustment for finding noise for image", value=False)
return [
info,
override_sampler,
override_prompt, original_prompt, original_negative_prompt,
override_steps, st,
override_strength,
cfg, randomness, sigma_adjustment,
]
def run(self, p, _, override_sampler, override_prompt, original_prompt, original_negative_prompt, override_steps, st, override_strength, cfg, randomness, sigma_adjustment):
# Override
if override_sampler:
p.sampler_index = [sampler.name for sampler in sd_samplers.samplers].index("Euler")
if override_prompt:
p.prompt = original_prompt
p.negative_prompt = original_negative_prompt
if override_steps:
p.steps = st
if override_strength:
p.denoising_strength = 1.0
def sample_extra(conditioning, unconditional_conditioning, seeds, subseeds, subseed_strength):
lat = (p.init_latent.cpu().numpy() * 10).astype(int)
same_params = self.cache is not None and self.cache.cfg_scale == cfg and self.cache.steps == st \
and self.cache.original_prompt == original_prompt \
and self.cache.original_negative_prompt == original_negative_prompt \
and self.cache.sigma_adjustment == sigma_adjustment
same_everything = same_params and self.cache.latent.shape == lat.shape and np.abs(self.cache.latent-lat).sum() < 100
if same_everything:
rec_noise = self.cache.noise
else:
shared.state.job_count += 1
cond = p.sd_model.get_learned_conditioning(p.batch_size * [original_prompt])
uncond = p.sd_model.get_learned_conditioning(p.batch_size * [original_negative_prompt])
if sigma_adjustment:
rec_noise = find_noise_for_image_sigma_adjustment(p, cond, uncond, cfg, st)
else:
rec_noise = find_noise_for_image(p, cond, uncond, cfg, st)
self.cache = Cached(rec_noise, cfg, st, lat, original_prompt, original_negative_prompt, sigma_adjustment)
rand_noise = processing.create_random_tensors(p.init_latent.shape[1:], seeds=seeds, subseeds=subseeds, subseed_strength=p.subseed_strength, seed_resize_from_h=p.seed_resize_from_h, seed_resize_from_w=p.seed_resize_from_w, p=p)
combined_noise = ((1 - randomness) * rec_noise + randomness * rand_noise) / ((randomness**2 + (1-randomness)**2) ** 0.5)
sampler = sd_samplers.create_sampler_with_index(sd_samplers.samplers, p.sampler_index, p.sd_model)
sigmas = sampler.model_wrap.get_sigmas(p.steps)
noise_dt = combined_noise - (p.init_latent / sigmas[0])
p.seed = p.seed + 1
return sampler.sample_img2img(p, p.init_latent, noise_dt, conditioning, unconditional_conditioning)
p.sample = sample_extra
p.extra_generation_params["Decode prompt"] = original_prompt
p.extra_generation_params["Decode negative prompt"] = original_negative_prompt
p.extra_generation_params["Decode CFG scale"] = cfg
p.extra_generation_params["Decode steps"] = st
p.extra_generation_params["Randomness"] = randomness
p.extra_generation_params["Sigma Adjustment"] = sigma_adjustment
processed = processing.process_images(p)
return processed
<|code_end|>
|
scripts/img2imgalt.py
<|code_start|>from collections import namedtuple
import numpy as np
from tqdm import trange
import modules.scripts as scripts
import gradio as gr
from modules import processing, shared, sd_samplers, prompt_parser
from modules.processing import Processed
from modules.shared import opts, cmd_opts, state
import torch
import k_diffusion as K
from PIL import Image
from torch import autocast
from einops import rearrange, repeat
def find_noise_for_image(p, cond, uncond, cfg_scale, steps):
x = p.init_latent
s_in = x.new_ones([x.shape[0]])
dnw = K.external.CompVisDenoiser(shared.sd_model)
sigmas = dnw.get_sigmas(steps).flip(0)
shared.state.sampling_steps = steps
for i in trange(1, len(sigmas)):
shared.state.sampling_step += 1
x_in = torch.cat([x] * 2)
sigma_in = torch.cat([sigmas[i] * s_in] * 2)
cond_in = torch.cat([uncond, cond])
image_conditioning = torch.cat([p.image_conditioning] * 2)
cond_in = {"c_concat": [image_conditioning], "c_crossattn": [cond_in]}
c_out, c_in = [K.utils.append_dims(k, x_in.ndim) for k in dnw.get_scalings(sigma_in)]
t = dnw.sigma_to_t(sigma_in)
eps = shared.sd_model.apply_model(x_in * c_in, t, cond=cond_in)
denoised_uncond, denoised_cond = (x_in + eps * c_out).chunk(2)
denoised = denoised_uncond + (denoised_cond - denoised_uncond) * cfg_scale
d = (x - denoised) / sigmas[i]
dt = sigmas[i] - sigmas[i - 1]
x = x + d * dt
sd_samplers.store_latent(x)
# This shouldn't be necessary, but solved some VRAM issues
del x_in, sigma_in, cond_in, c_out, c_in, t,
del eps, denoised_uncond, denoised_cond, denoised, d, dt
shared.state.nextjob()
return x / x.std()
Cached = namedtuple("Cached", ["noise", "cfg_scale", "steps", "latent", "original_prompt", "original_negative_prompt", "sigma_adjustment"])
# Based on changes suggested by briansemrau in https://github.com/AUTOMATIC1111/stable-diffusion-webui/issues/736
def find_noise_for_image_sigma_adjustment(p, cond, uncond, cfg_scale, steps):
x = p.init_latent
s_in = x.new_ones([x.shape[0]])
dnw = K.external.CompVisDenoiser(shared.sd_model)
sigmas = dnw.get_sigmas(steps).flip(0)
shared.state.sampling_steps = steps
for i in trange(1, len(sigmas)):
shared.state.sampling_step += 1
x_in = torch.cat([x] * 2)
sigma_in = torch.cat([sigmas[i - 1] * s_in] * 2)
cond_in = torch.cat([uncond, cond])
image_conditioning = torch.cat([p.image_conditioning] * 2)
cond_in = {"c_concat": [image_conditioning], "c_crossattn": [cond_in]}
c_out, c_in = [K.utils.append_dims(k, x_in.ndim) for k in dnw.get_scalings(sigma_in)]
if i == 1:
t = dnw.sigma_to_t(torch.cat([sigmas[i] * s_in] * 2))
else:
t = dnw.sigma_to_t(sigma_in)
eps = shared.sd_model.apply_model(x_in * c_in, t, cond=cond_in)
denoised_uncond, denoised_cond = (x_in + eps * c_out).chunk(2)
denoised = denoised_uncond + (denoised_cond - denoised_uncond) * cfg_scale
if i == 1:
d = (x - denoised) / (2 * sigmas[i])
else:
d = (x - denoised) / sigmas[i - 1]
dt = sigmas[i] - sigmas[i - 1]
x = x + d * dt
sd_samplers.store_latent(x)
# This shouldn't be necessary, but solved some VRAM issues
del x_in, sigma_in, cond_in, c_out, c_in, t,
del eps, denoised_uncond, denoised_cond, denoised, d, dt
shared.state.nextjob()
return x / sigmas[-1]
class Script(scripts.Script):
def __init__(self):
self.cache = None
def title(self):
return "img2img alternative test"
def show(self, is_img2img):
return is_img2img
def ui(self, is_img2img):
info = gr.Markdown('''
* `CFG Scale` should be 2 or lower.
''')
override_sampler = gr.Checkbox(label="Override `Sampling method` to Euler?(this method is built for it)", value=True)
override_prompt = gr.Checkbox(label="Override `prompt` to the same value as `original prompt`?(and `negative prompt`)", value=True)
original_prompt = gr.Textbox(label="Original prompt", lines=1)
original_negative_prompt = gr.Textbox(label="Original negative prompt", lines=1)
override_steps = gr.Checkbox(label="Override `Sampling Steps` to the same value as `Decode steps`?", value=True)
st = gr.Slider(label="Decode steps", minimum=1, maximum=150, step=1, value=50)
override_strength = gr.Checkbox(label="Override `Denoising strength` to 1?", value=True)
cfg = gr.Slider(label="Decode CFG scale", minimum=0.0, maximum=15.0, step=0.1, value=1.0)
randomness = gr.Slider(label="Randomness", minimum=0.0, maximum=1.0, step=0.01, value=0.0)
sigma_adjustment = gr.Checkbox(label="Sigma adjustment for finding noise for image", value=False)
return [
info,
override_sampler,
override_prompt, original_prompt, original_negative_prompt,
override_steps, st,
override_strength,
cfg, randomness, sigma_adjustment,
]
def run(self, p, _, override_sampler, override_prompt, original_prompt, original_negative_prompt, override_steps, st, override_strength, cfg, randomness, sigma_adjustment):
# Override
if override_sampler:
p.sampler_index = [sampler.name for sampler in sd_samplers.samplers].index("Euler")
if override_prompt:
p.prompt = original_prompt
p.negative_prompt = original_negative_prompt
if override_steps:
p.steps = st
if override_strength:
p.denoising_strength = 1.0
def sample_extra(conditioning, unconditional_conditioning, seeds, subseeds, subseed_strength):
lat = (p.init_latent.cpu().numpy() * 10).astype(int)
same_params = self.cache is not None and self.cache.cfg_scale == cfg and self.cache.steps == st \
and self.cache.original_prompt == original_prompt \
and self.cache.original_negative_prompt == original_negative_prompt \
and self.cache.sigma_adjustment == sigma_adjustment
same_everything = same_params and self.cache.latent.shape == lat.shape and np.abs(self.cache.latent-lat).sum() < 100
if same_everything:
rec_noise = self.cache.noise
else:
shared.state.job_count += 1
cond = p.sd_model.get_learned_conditioning(p.batch_size * [original_prompt])
uncond = p.sd_model.get_learned_conditioning(p.batch_size * [original_negative_prompt])
if sigma_adjustment:
rec_noise = find_noise_for_image_sigma_adjustment(p, cond, uncond, cfg, st)
else:
rec_noise = find_noise_for_image(p, cond, uncond, cfg, st)
self.cache = Cached(rec_noise, cfg, st, lat, original_prompt, original_negative_prompt, sigma_adjustment)
rand_noise = processing.create_random_tensors(p.init_latent.shape[1:], seeds=seeds, subseeds=subseeds, subseed_strength=p.subseed_strength, seed_resize_from_h=p.seed_resize_from_h, seed_resize_from_w=p.seed_resize_from_w, p=p)
combined_noise = ((1 - randomness) * rec_noise + randomness * rand_noise) / ((randomness**2 + (1-randomness)**2) ** 0.5)
sampler = sd_samplers.create_sampler_with_index(sd_samplers.samplers, p.sampler_index, p.sd_model)
sigmas = sampler.model_wrap.get_sigmas(p.steps)
noise_dt = combined_noise - (p.init_latent / sigmas[0])
p.seed = p.seed + 1
return sampler.sample_img2img(p, p.init_latent, noise_dt, conditioning, unconditional_conditioning, image_conditioning=p.image_conditioning)
p.sample = sample_extra
p.extra_generation_params["Decode prompt"] = original_prompt
p.extra_generation_params["Decode negative prompt"] = original_negative_prompt
p.extra_generation_params["Decode CFG scale"] = cfg
p.extra_generation_params["Decode steps"] = st
p.extra_generation_params["Randomness"] = randomness
p.extra_generation_params["Sigma Adjustment"] = sigma_adjustment
processed = processing.process_images(p)
return processed
<|code_end|>
|
Deepdanbooru breaks cuda on image generation after running.
**Describe the bug**
After running deepdanbooru on an image, new images can't be generated anymore due to a cuda error.
**To Reproduce**
Steps to reproduce the behavior:
1. Enable deepdanbooru
2. Generate or upload an image to img2img
3. Click "Interrogate DeepBooru"
4. After it finishes it, try to generate any image on either generator
5. Generation will halt, a cuda error will be shown
Error:
```
Error completing request
Arguments: (0, 'Test', 'Test', 'None', 'None', <PIL.Image.Image image mode=RGB size=512x512 at 0x7F483FF50400>, None, None, None, 0, 60, 0, 4, 1, False, False, 1, 1, 7, 0.75, -1.0, -1.0, 0, 0, 0, False, 512, 512, 0, False, 32, 0, '', '', 0, 4.0, 1, 1, 0, 0, 0.0, 4.0, 0.1, 0.1, 1, True, False, False, 0, False, '', 1, False, 0, 1, False, False, False, '', '', '', 1, 50, 0, False, 4, 1, 4, 0.09, True, 1, 0, 7, False, False, '<p style="margin-bottom:0.75em">Recommended settings: Sampling Steps: 80-100, Sampler: Euler a, Denoising strength: 0.8</p>', 128, 8, ['left', 'right', 'up', 'down'], 1, 0.05, 128, 4, 0, ['left', 'right', 'up', 'down'], False, False, None, '', '<p style="margin-bottom:0.75em">Will upscale the image to twice the dimensions; use width and height sliders to set tile size</p>', 64, 0, 1, '', 0, '', True, False) {}
Traceback (most recent call last):
File "/home/me/src/stable-diffusion-webui/modules/ui.py", line 184, in f
res = list(func(*args, **kwargs))
File "/home/me/src/stable-diffusion-webui/webui.py", line 64, in f
res = func(*args, **kwargs)
File "/home/me/src/stable-diffusion-webui/modules/img2img.py", line 126, in img2img
processed = process_images(p)
File "/home/me/src/stable-diffusion-webui/modules/processing.py", line 371, in process_images
p.init(all_prompts, all_seeds, all_subseeds)
File "/home/me/src/stable-diffusion-webui/modules/processing.py", line 607, in init
self.sampler = sd_samplers.create_sampler_with_index(sd_samplers.samplers_for_img2img, self.sampler_index, self.sd_model)
File "/home/me/src/stable-diffusion-webui/modules/sd_samplers.py", line 50, in create_sampler_with_index
sampler = config.constructor(model)
File "/home/me/src/stable-diffusion-webui/modules/sd_samplers.py", line 33, in <lambda>
SamplerData(label, lambda model, funcname=funcname: KDiffusionSampler(funcname, model), aliases, options)
File "/home/me/src/stable-diffusion-webui/modules/sd_samplers.py", line 306, in __init__
self.model_wrap = k_diffusion.external.CompVisDenoiser(sd_model, quantize=shared.opts.enable_quantization)
File "/home/me/src/stable-diffusion-webui/repositories/k-diffusion/k_diffusion/external.py", line 135, in __init__
super().__init__(model, model.alphas_cumprod, quantize=quantize)
File "/home/me/src/stable-diffusion-webui/repositories/k-diffusion/k_diffusion/external.py", line 92, in __init__
super().__init__(((1 - alphas_cumprod) / alphas_cumprod) ** 0.5, quantize)
File "/home/me/src/stable-diffusion-webui/venv/lib64/python3.10/site-packages/torch/_tensor.py", line 32, in wrapped
return f(*args, **kwargs)
File "/home/me/src/stable-diffusion-webui/venv/lib64/python3.10/site-packages/torch/_tensor.py", line 639, in __rsub__
return _C._VariableFunctions.rsub(self, other)
RuntimeError: CUDA error: unspecified launch failure
CUDA kernel errors might be asynchronously reported at some other API call,so the stacktrace below might be incorrect.
For debugging consider passing CUDA_LAUNCH_BLOCKING=1.
```
**Desktop (please complete the following information):**
- OS: Fedora 36, drivers and cuda from the Nvidia repo (also happens on Fedora 35, drivers from rpmfusion)
- Browser Chrome
- Commit revision c3c8eef9fd5a0c8b26319e32ca4a19b56204e6df
**Additional context**
Launch flags: `--listen --opt-split-attention --allow-code --deepdanbooru`
Everything else seems to be working fine, no issues. Just the deepdanbooru generation seems to break it.
|
launch.py
<|code_start|># this scripts installs necessary requirements and launches main program in webui.py
import subprocess
import os
import sys
import importlib.util
import shlex
import platform
dir_repos = "repositories"
python = sys.executable
git = os.environ.get('GIT', "git")
index_url = os.environ.get('INDEX_URL', "")
def extract_arg(args, name):
return [x for x in args if x != name], name in args
def run(command, desc=None, errdesc=None):
if desc is not None:
print(desc)
result = subprocess.run(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
if result.returncode != 0:
message = f"""{errdesc or 'Error running command'}.
Command: {command}
Error code: {result.returncode}
stdout: {result.stdout.decode(encoding="utf8", errors="ignore") if len(result.stdout)>0 else '<empty>'}
stderr: {result.stderr.decode(encoding="utf8", errors="ignore") if len(result.stderr)>0 else '<empty>'}
"""
raise RuntimeError(message)
return result.stdout.decode(encoding="utf8", errors="ignore")
def check_run(command):
result = subprocess.run(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
return result.returncode == 0
def is_installed(package):
try:
spec = importlib.util.find_spec(package)
except ModuleNotFoundError:
return False
return spec is not None
def repo_dir(name):
return os.path.join(dir_repos, name)
def run_python(code, desc=None, errdesc=None):
return run(f'"{python}" -c "{code}"', desc, errdesc)
def run_pip(args, desc=None):
index_url_line = f' --index-url {index_url}' if index_url != '' else ''
return run(f'"{python}" -m pip {args} --prefer-binary{index_url_line}', desc=f"Installing {desc}", errdesc=f"Couldn't install {desc}")
def check_run_python(code):
return check_run(f'"{python}" -c "{code}"')
def git_clone(url, dir, name, commithash=None):
# TODO clone into temporary dir and move if successful
if os.path.exists(dir):
if commithash is None:
return
current_hash = run(f'"{git}" -C {dir} rev-parse HEAD', None, f"Couldn't determine {name}'s hash: {commithash}").strip()
if current_hash == commithash:
return
run(f'"{git}" -C {dir} fetch', f"Fetching updates for {name}...", f"Couldn't fetch {name}")
run(f'"{git}" -C {dir} checkout {commithash}', f"Checking out commit for {name} with hash: {commithash}...", f"Couldn't checkout commit {commithash} for {name}")
return
run(f'"{git}" clone "{url}" "{dir}"', f"Cloning {name} into {dir}...", f"Couldn't clone {name}")
if commithash is not None:
run(f'"{git}" -C {dir} checkout {commithash}', None, "Couldn't checkout {name}'s hash: {commithash}")
def version_check(commit):
try:
import requests
commits = requests.get('https://api.github.com/repos/AUTOMATIC1111/stable-diffusion-webui/branches/master').json()
if commit != "<none>" and commits['commit']['sha'] != commit:
print("--------------------------------------------------------")
print("| You are not up to date with the most recent release. |")
print("| Consider running `git pull` to update. |")
print("--------------------------------------------------------")
elif commits['commit']['sha'] == commit:
print("You are up to date with the most recent release.")
else:
print("Not a git clone, can't perform version check.")
except Exception as e:
print("versipm check failed",e)
def prepare_enviroment():
torch_command = os.environ.get('TORCH_COMMAND', "pip install torch==1.12.1+cu113 torchvision==0.13.1+cu113 --extra-index-url https://download.pytorch.org/whl/cu113")
requirements_file = os.environ.get('REQS_FILE', "requirements_versions.txt")
commandline_args = os.environ.get('COMMANDLINE_ARGS', "")
gfpgan_package = os.environ.get('GFPGAN_PACKAGE', "git+https://github.com/TencentARC/GFPGAN.git@8d2447a2d918f8eba5a4a01463fd48e45126a379")
clip_package = os.environ.get('CLIP_PACKAGE', "git+https://github.com/openai/CLIP.git@d50d76daa670286dd6cacf3bcd80b5e4823fc8e1")
deepdanbooru_package = os.environ.get('DEEPDANBOORU_PACKAGE', "git+https://github.com/KichangKim/DeepDanbooru.git@edf73df4cdaeea2cf00e9ac08bd8a9026b7a7b26")
xformers_windows_package = os.environ.get('XFORMERS_WINDOWS_PACKAGE', 'https://github.com/C43H66N12O12S2/stable-diffusion-webui/releases/download/f/xformers-0.0.14.dev0-cp310-cp310-win_amd64.whl')
stable_diffusion_repo = os.environ.get('STABLE_DIFFUSION_REPO', "https://github.com/CompVis/stable-diffusion.git")
taming_transformers_repo = os.environ.get('TAMING_REANSFORMERS_REPO', "https://github.com/CompVis/taming-transformers.git")
k_diffusion_repo = os.environ.get('K_DIFFUSION_REPO', 'https://github.com/crowsonkb/k-diffusion.git')
codeformer_repo = os.environ.get('CODEFORMET_REPO', 'https://github.com/sczhou/CodeFormer.git')
blip_repo = os.environ.get('BLIP_REPO', 'https://github.com/salesforce/BLIP.git')
stable_diffusion_commit_hash = os.environ.get('STABLE_DIFFUSION_COMMIT_HASH', "69ae4b35e0a0f6ee1af8bb9a5d0016ccb27e36dc")
taming_transformers_commit_hash = os.environ.get('TAMING_TRANSFORMERS_COMMIT_HASH', "24268930bf1dce879235a7fddd0b2355b84d7ea6")
k_diffusion_commit_hash = os.environ.get('K_DIFFUSION_COMMIT_HASH', "f4e99857772fc3a126ba886aadf795a332774878")
codeformer_commit_hash = os.environ.get('CODEFORMER_COMMIT_HASH', "c5b4593074ba6214284d6acd5f1719b6c5d739af")
blip_commit_hash = os.environ.get('BLIP_COMMIT_HASH', "48211a1594f1321b00f14c9f7a5b4813144b2fb9")
sys.argv += shlex.split(commandline_args)
sys.argv, skip_torch_cuda_test = extract_arg(sys.argv, '--skip-torch-cuda-test')
sys.argv, reinstall_xformers = extract_arg(sys.argv, '--reinstall-xformers')
sys.argv, update_check = extract_arg(sys.argv, '--update-check')
xformers = '--xformers' in sys.argv
deepdanbooru = '--deepdanbooru' in sys.argv
ngrok = '--ngrok' in sys.argv
try:
commit = run(f"{git} rev-parse HEAD").strip()
except Exception:
commit = "<none>"
print(f"Python {sys.version}")
print(f"Commit hash: {commit}")
if not is_installed("torch") or not is_installed("torchvision"):
run(f'"{python}" -m {torch_command}', "Installing torch and torchvision", "Couldn't install torch")
if not skip_torch_cuda_test:
run_python("import torch; assert torch.cuda.is_available(), 'Torch is not able to use GPU; add --skip-torch-cuda-test to COMMANDLINE_ARGS variable to disable this check'")
if not is_installed("gfpgan"):
run_pip(f"install {gfpgan_package}", "gfpgan")
if not is_installed("clip"):
run_pip(f"install {clip_package}", "clip")
if (not is_installed("xformers") or reinstall_xformers) and xformers:
if platform.system() == "Windows":
if platform.python_version().startswith("3.10"):
run_pip(f"install -U -I --no-deps {xformers_windows_package}", "xformers")
else:
print("Installation of xformers is not supported in this version of Python.")
print("You can also check this and build manually: https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Xformers#building-xformers-on-windows-by-duckness")
if not is_installed("xformers"):
exit(0)
elif platform.system() == "Linux":
run_pip("install xformers", "xformers")
if not is_installed("deepdanbooru") and deepdanbooru:
run_pip(f"install {deepdanbooru_package}#egg=deepdanbooru[tensorflow] tensorflow==2.10.0 tensorflow-io==0.27.0", "deepdanbooru")
if not is_installed("pyngrok") and ngrok:
run_pip("install pyngrok", "ngrok")
os.makedirs(dir_repos, exist_ok=True)
git_clone(stable_diffusion_repo, repo_dir('stable-diffusion'), "Stable Diffusion", stable_diffusion_commit_hash)
git_clone(taming_transformers_repo, repo_dir('taming-transformers'), "Taming Transformers", taming_transformers_commit_hash)
git_clone(k_diffusion_repo, repo_dir('k-diffusion'), "K-diffusion", k_diffusion_commit_hash)
git_clone(codeformer_repo, repo_dir('CodeFormer'), "CodeFormer", codeformer_commit_hash)
git_clone(blip_repo, repo_dir('BLIP'), "BLIP", blip_commit_hash)
if not is_installed("lpips"):
run_pip(f"install -r {os.path.join(repo_dir('CodeFormer'), 'requirements.txt')}", "requirements for CodeFormer")
run_pip(f"install -r {requirements_file}", "requirements for Web UI")
if update_check:
version_check(commit)
if "--exit" in sys.argv:
print("Exiting because of --exit argument")
exit(0)
def start_webui():
print(f"Launching Web UI with arguments: {' '.join(sys.argv[1:])}")
import webui
webui.webui()
if __name__ == "__main__":
prepare_enviroment()
start_webui()
<|code_end|>
|
launch.py
<|code_start|># this scripts installs necessary requirements and launches main program in webui.py
import subprocess
import os
import sys
import importlib.util
import shlex
import platform
dir_repos = "repositories"
python = sys.executable
git = os.environ.get('GIT', "git")
index_url = os.environ.get('INDEX_URL', "")
def extract_arg(args, name):
return [x for x in args if x != name], name in args
def run(command, desc=None, errdesc=None):
if desc is not None:
print(desc)
result = subprocess.run(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
if result.returncode != 0:
message = f"""{errdesc or 'Error running command'}.
Command: {command}
Error code: {result.returncode}
stdout: {result.stdout.decode(encoding="utf8", errors="ignore") if len(result.stdout)>0 else '<empty>'}
stderr: {result.stderr.decode(encoding="utf8", errors="ignore") if len(result.stderr)>0 else '<empty>'}
"""
raise RuntimeError(message)
return result.stdout.decode(encoding="utf8", errors="ignore")
def check_run(command):
result = subprocess.run(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
return result.returncode == 0
def is_installed(package):
try:
spec = importlib.util.find_spec(package)
except ModuleNotFoundError:
return False
return spec is not None
def repo_dir(name):
return os.path.join(dir_repos, name)
def run_python(code, desc=None, errdesc=None):
return run(f'"{python}" -c "{code}"', desc, errdesc)
def run_pip(args, desc=None):
index_url_line = f' --index-url {index_url}' if index_url != '' else ''
return run(f'"{python}" -m pip {args} --prefer-binary{index_url_line}', desc=f"Installing {desc}", errdesc=f"Couldn't install {desc}")
def check_run_python(code):
return check_run(f'"{python}" -c "{code}"')
def git_clone(url, dir, name, commithash=None):
# TODO clone into temporary dir and move if successful
if os.path.exists(dir):
if commithash is None:
return
current_hash = run(f'"{git}" -C {dir} rev-parse HEAD', None, f"Couldn't determine {name}'s hash: {commithash}").strip()
if current_hash == commithash:
return
run(f'"{git}" -C {dir} fetch', f"Fetching updates for {name}...", f"Couldn't fetch {name}")
run(f'"{git}" -C {dir} checkout {commithash}', f"Checking out commit for {name} with hash: {commithash}...", f"Couldn't checkout commit {commithash} for {name}")
return
run(f'"{git}" clone "{url}" "{dir}"', f"Cloning {name} into {dir}...", f"Couldn't clone {name}")
if commithash is not None:
run(f'"{git}" -C {dir} checkout {commithash}', None, "Couldn't checkout {name}'s hash: {commithash}")
def version_check(commit):
try:
import requests
commits = requests.get('https://api.github.com/repos/AUTOMATIC1111/stable-diffusion-webui/branches/master').json()
if commit != "<none>" and commits['commit']['sha'] != commit:
print("--------------------------------------------------------")
print("| You are not up to date with the most recent release. |")
print("| Consider running `git pull` to update. |")
print("--------------------------------------------------------")
elif commits['commit']['sha'] == commit:
print("You are up to date with the most recent release.")
else:
print("Not a git clone, can't perform version check.")
except Exception as e:
print("versipm check failed",e)
def prepare_enviroment():
torch_command = os.environ.get('TORCH_COMMAND', "pip install torch==1.12.1+cu113 torchvision==0.13.1+cu113 --extra-index-url https://download.pytorch.org/whl/cu113")
requirements_file = os.environ.get('REQS_FILE', "requirements_versions.txt")
commandline_args = os.environ.get('COMMANDLINE_ARGS', "")
gfpgan_package = os.environ.get('GFPGAN_PACKAGE', "git+https://github.com/TencentARC/GFPGAN.git@8d2447a2d918f8eba5a4a01463fd48e45126a379")
clip_package = os.environ.get('CLIP_PACKAGE', "git+https://github.com/openai/CLIP.git@d50d76daa670286dd6cacf3bcd80b5e4823fc8e1")
deepdanbooru_package = os.environ.get('DEEPDANBOORU_PACKAGE', "git+https://github.com/KichangKim/DeepDanbooru.git@d91a2963bf87c6a770d74894667e9ffa9f6de7ff")
xformers_windows_package = os.environ.get('XFORMERS_WINDOWS_PACKAGE', 'https://github.com/C43H66N12O12S2/stable-diffusion-webui/releases/download/f/xformers-0.0.14.dev0-cp310-cp310-win_amd64.whl')
stable_diffusion_repo = os.environ.get('STABLE_DIFFUSION_REPO', "https://github.com/CompVis/stable-diffusion.git")
taming_transformers_repo = os.environ.get('TAMING_REANSFORMERS_REPO', "https://github.com/CompVis/taming-transformers.git")
k_diffusion_repo = os.environ.get('K_DIFFUSION_REPO', 'https://github.com/crowsonkb/k-diffusion.git')
codeformer_repo = os.environ.get('CODEFORMET_REPO', 'https://github.com/sczhou/CodeFormer.git')
blip_repo = os.environ.get('BLIP_REPO', 'https://github.com/salesforce/BLIP.git')
stable_diffusion_commit_hash = os.environ.get('STABLE_DIFFUSION_COMMIT_HASH', "69ae4b35e0a0f6ee1af8bb9a5d0016ccb27e36dc")
taming_transformers_commit_hash = os.environ.get('TAMING_TRANSFORMERS_COMMIT_HASH', "24268930bf1dce879235a7fddd0b2355b84d7ea6")
k_diffusion_commit_hash = os.environ.get('K_DIFFUSION_COMMIT_HASH', "f4e99857772fc3a126ba886aadf795a332774878")
codeformer_commit_hash = os.environ.get('CODEFORMER_COMMIT_HASH', "c5b4593074ba6214284d6acd5f1719b6c5d739af")
blip_commit_hash = os.environ.get('BLIP_COMMIT_HASH', "48211a1594f1321b00f14c9f7a5b4813144b2fb9")
sys.argv += shlex.split(commandline_args)
sys.argv, skip_torch_cuda_test = extract_arg(sys.argv, '--skip-torch-cuda-test')
sys.argv, reinstall_xformers = extract_arg(sys.argv, '--reinstall-xformers')
sys.argv, update_check = extract_arg(sys.argv, '--update-check')
xformers = '--xformers' in sys.argv
deepdanbooru = '--deepdanbooru' in sys.argv
ngrok = '--ngrok' in sys.argv
try:
commit = run(f"{git} rev-parse HEAD").strip()
except Exception:
commit = "<none>"
print(f"Python {sys.version}")
print(f"Commit hash: {commit}")
if not is_installed("torch") or not is_installed("torchvision"):
run(f'"{python}" -m {torch_command}', "Installing torch and torchvision", "Couldn't install torch")
if not skip_torch_cuda_test:
run_python("import torch; assert torch.cuda.is_available(), 'Torch is not able to use GPU; add --skip-torch-cuda-test to COMMANDLINE_ARGS variable to disable this check'")
if not is_installed("gfpgan"):
run_pip(f"install {gfpgan_package}", "gfpgan")
if not is_installed("clip"):
run_pip(f"install {clip_package}", "clip")
if (not is_installed("xformers") or reinstall_xformers) and xformers:
if platform.system() == "Windows":
if platform.python_version().startswith("3.10"):
run_pip(f"install -U -I --no-deps {xformers_windows_package}", "xformers")
else:
print("Installation of xformers is not supported in this version of Python.")
print("You can also check this and build manually: https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Xformers#building-xformers-on-windows-by-duckness")
if not is_installed("xformers"):
exit(0)
elif platform.system() == "Linux":
run_pip("install xformers", "xformers")
if not is_installed("deepdanbooru") and deepdanbooru:
run_pip(f"install {deepdanbooru_package}#egg=deepdanbooru[tensorflow] tensorflow==2.10.0 tensorflow-io==0.27.0", "deepdanbooru")
if not is_installed("pyngrok") and ngrok:
run_pip("install pyngrok", "ngrok")
os.makedirs(dir_repos, exist_ok=True)
git_clone(stable_diffusion_repo, repo_dir('stable-diffusion'), "Stable Diffusion", stable_diffusion_commit_hash)
git_clone(taming_transformers_repo, repo_dir('taming-transformers'), "Taming Transformers", taming_transformers_commit_hash)
git_clone(k_diffusion_repo, repo_dir('k-diffusion'), "K-diffusion", k_diffusion_commit_hash)
git_clone(codeformer_repo, repo_dir('CodeFormer'), "CodeFormer", codeformer_commit_hash)
git_clone(blip_repo, repo_dir('BLIP'), "BLIP", blip_commit_hash)
if not is_installed("lpips"):
run_pip(f"install -r {os.path.join(repo_dir('CodeFormer'), 'requirements.txt')}", "requirements for CodeFormer")
run_pip(f"install -r {requirements_file}", "requirements for Web UI")
if update_check:
version_check(commit)
if "--exit" in sys.argv:
print("Exiting because of --exit argument")
exit(0)
def start_webui():
print(f"Launching Web UI with arguments: {' '.join(sys.argv[1:])}")
import webui
webui.webui()
if __name__ == "__main__":
prepare_enviroment()
start_webui()
<|code_end|>
|
Portrait mode images generates in landscape mode in img2img [Bug]:
### Is there an existing issue for this?
- [X] I have searched the existing issues and checked the recent builds/commits
### What happened?
Image in portrait mode shows up fine in the preview, but when the alternative image is generated it is rotated to landscape mode.
### Steps to reproduce the problem
1. Load a image that was taken using a phone in portrait mode.
2. Set a prompt and press generate.
### What should have happened?
It should have generated the output image in portrait mode as well.
### Commit where the problem happens
6bd6154a92eb05c80d66df661a38f8b70cc13729
### What platforms do you use to access UI ?
Windows
### What browsers do you use to access the UI ?
Microsoft Edge
### Command Line Arguments
```Shell
--xformers
```
### Additional information, context and logs
When images are taken in portrait mode, they are often stored as landscape, but have information that it is portrait so that they can be displayed correctly in image viewers, this should be used to determine how the output image should be generated.
|
modules/img2img.py
<|code_start|>import math
import os
import sys
import traceback
import numpy as np
from PIL import Image, ImageOps, ImageChops
from modules import devices
from modules.processing import Processed, StableDiffusionProcessingImg2Img, process_images
from modules.shared import opts, state
import modules.shared as shared
import modules.processing as processing
from modules.ui import plaintext_to_html
import modules.images as images
import modules.scripts
def process_batch(p, input_dir, output_dir, args):
processing.fix_seed(p)
images = [file for file in [os.path.join(input_dir, x) for x in os.listdir(input_dir)] if os.path.isfile(file)]
print(f"Will process {len(images)} images, creating {p.n_iter * p.batch_size} new images for each.")
save_normally = output_dir == ''
p.do_not_save_grid = True
p.do_not_save_samples = not save_normally
state.job_count = len(images) * p.n_iter
for i, image in enumerate(images):
state.job = f"{i+1} out of {len(images)}"
if state.skipped:
state.skipped = False
if state.interrupted:
break
img = Image.open(image)
p.init_images = [img] * p.batch_size
proc = modules.scripts.scripts_img2img.run(p, *args)
if proc is None:
proc = process_images(p)
for n, processed_image in enumerate(proc.images):
filename = os.path.basename(image)
if n > 0:
left, right = os.path.splitext(filename)
filename = f"{left}-{n}{right}"
if not save_normally:
processed_image.save(os.path.join(output_dir, filename))
def img2img(mode: int, prompt: str, negative_prompt: str, prompt_style: str, prompt_style2: str, init_img, init_img_with_mask, init_img_inpaint, init_mask_inpaint, mask_mode, steps: int, sampler_index: int, mask_blur: int, inpainting_fill: int, restore_faces: bool, tiling: bool, n_iter: int, batch_size: int, cfg_scale: float, denoising_strength: float, seed: int, subseed: int, subseed_strength: float, seed_resize_from_h: int, seed_resize_from_w: int, seed_enable_extras: bool, height: int, width: int, resize_mode: int, inpaint_full_res: bool, inpaint_full_res_padding: int, inpainting_mask_invert: int, img2img_batch_input_dir: str, img2img_batch_output_dir: str, *args):
is_inpaint = mode == 1
is_batch = mode == 2
if is_inpaint:
if mask_mode == 0:
image = init_img_with_mask['image']
mask = init_img_with_mask['mask']
alpha_mask = ImageOps.invert(image.split()[-1]).convert('L').point(lambda x: 255 if x > 0 else 0, mode='1')
mask = ImageChops.lighter(alpha_mask, mask.convert('L')).convert('L')
image = image.convert('RGB')
else:
image = init_img_inpaint
mask = init_mask_inpaint
else:
image = init_img
mask = None
assert 0. <= denoising_strength <= 1., 'can only work with strength in [0.0, 1.0]'
p = StableDiffusionProcessingImg2Img(
sd_model=shared.sd_model,
outpath_samples=opts.outdir_samples or opts.outdir_img2img_samples,
outpath_grids=opts.outdir_grids or opts.outdir_img2img_grids,
prompt=prompt,
negative_prompt=negative_prompt,
styles=[prompt_style, prompt_style2],
seed=seed,
subseed=subseed,
subseed_strength=subseed_strength,
seed_resize_from_h=seed_resize_from_h,
seed_resize_from_w=seed_resize_from_w,
seed_enable_extras=seed_enable_extras,
sampler_index=sampler_index,
batch_size=batch_size,
n_iter=n_iter,
steps=steps,
cfg_scale=cfg_scale,
width=width,
height=height,
restore_faces=restore_faces,
tiling=tiling,
init_images=[image],
mask=mask,
mask_blur=mask_blur,
inpainting_fill=inpainting_fill,
resize_mode=resize_mode,
denoising_strength=denoising_strength,
inpaint_full_res=inpaint_full_res,
inpaint_full_res_padding=inpaint_full_res_padding,
inpainting_mask_invert=inpainting_mask_invert,
)
p.scripts = modules.scripts.scripts_txt2img
p.script_args = args
if shared.cmd_opts.enable_console_prompts:
print(f"\nimg2img: {prompt}", file=shared.progress_print_out)
p.extra_generation_params["Mask blur"] = mask_blur
if is_batch:
assert not shared.cmd_opts.hide_ui_dir_config, "Launched with --hide-ui-dir-config, batch img2img disabled"
process_batch(p, img2img_batch_input_dir, img2img_batch_output_dir, args)
processed = Processed(p, [], p.seed, "")
else:
processed = modules.scripts.scripts_img2img.run(p, *args)
if processed is None:
processed = process_images(p)
shared.total_tqdm.clear()
generation_info_js = processed.js()
if opts.samples_log_stdout:
print(generation_info_js)
if opts.do_not_show_images:
processed.images = []
return processed.images, generation_info_js, plaintext_to_html(processed.info)
<|code_end|>
|
modules/img2img.py
<|code_start|>import math
import os
import sys
import traceback
import numpy as np
from PIL import Image, ImageOps, ImageChops
from modules import devices
from modules.processing import Processed, StableDiffusionProcessingImg2Img, process_images
from modules.shared import opts, state
import modules.shared as shared
import modules.processing as processing
from modules.ui import plaintext_to_html
import modules.images as images
import modules.scripts
def process_batch(p, input_dir, output_dir, args):
processing.fix_seed(p)
images = [file for file in [os.path.join(input_dir, x) for x in os.listdir(input_dir)] if os.path.isfile(file)]
print(f"Will process {len(images)} images, creating {p.n_iter * p.batch_size} new images for each.")
save_normally = output_dir == ''
p.do_not_save_grid = True
p.do_not_save_samples = not save_normally
state.job_count = len(images) * p.n_iter
for i, image in enumerate(images):
state.job = f"{i+1} out of {len(images)}"
if state.skipped:
state.skipped = False
if state.interrupted:
break
img = Image.open(image)
# Use the EXIF orientation of photos taken by smartphones.
img = ImageOps.exif_transpose(img)
p.init_images = [img] * p.batch_size
proc = modules.scripts.scripts_img2img.run(p, *args)
if proc is None:
proc = process_images(p)
for n, processed_image in enumerate(proc.images):
filename = os.path.basename(image)
if n > 0:
left, right = os.path.splitext(filename)
filename = f"{left}-{n}{right}"
if not save_normally:
processed_image.save(os.path.join(output_dir, filename))
def img2img(mode: int, prompt: str, negative_prompt: str, prompt_style: str, prompt_style2: str, init_img, init_img_with_mask, init_img_inpaint, init_mask_inpaint, mask_mode, steps: int, sampler_index: int, mask_blur: int, inpainting_fill: int, restore_faces: bool, tiling: bool, n_iter: int, batch_size: int, cfg_scale: float, denoising_strength: float, seed: int, subseed: int, subseed_strength: float, seed_resize_from_h: int, seed_resize_from_w: int, seed_enable_extras: bool, height: int, width: int, resize_mode: int, inpaint_full_res: bool, inpaint_full_res_padding: int, inpainting_mask_invert: int, img2img_batch_input_dir: str, img2img_batch_output_dir: str, *args):
is_inpaint = mode == 1
is_batch = mode == 2
if is_inpaint:
# Drawn mask
if mask_mode == 0:
image = init_img_with_mask['image']
mask = init_img_with_mask['mask']
alpha_mask = ImageOps.invert(image.split()[-1]).convert('L').point(lambda x: 255 if x > 0 else 0, mode='1')
mask = ImageChops.lighter(alpha_mask, mask.convert('L')).convert('L')
image = image.convert('RGB')
# Uploaded mask
else:
image = init_img_inpaint
mask = init_mask_inpaint
# No mask
else:
image = init_img
mask = None
# Use the EXIF orientation of photos taken by smartphones.
image = ImageOps.exif_transpose(image)
assert 0. <= denoising_strength <= 1., 'can only work with strength in [0.0, 1.0]'
p = StableDiffusionProcessingImg2Img(
sd_model=shared.sd_model,
outpath_samples=opts.outdir_samples or opts.outdir_img2img_samples,
outpath_grids=opts.outdir_grids or opts.outdir_img2img_grids,
prompt=prompt,
negative_prompt=negative_prompt,
styles=[prompt_style, prompt_style2],
seed=seed,
subseed=subseed,
subseed_strength=subseed_strength,
seed_resize_from_h=seed_resize_from_h,
seed_resize_from_w=seed_resize_from_w,
seed_enable_extras=seed_enable_extras,
sampler_index=sampler_index,
batch_size=batch_size,
n_iter=n_iter,
steps=steps,
cfg_scale=cfg_scale,
width=width,
height=height,
restore_faces=restore_faces,
tiling=tiling,
init_images=[image],
mask=mask,
mask_blur=mask_blur,
inpainting_fill=inpainting_fill,
resize_mode=resize_mode,
denoising_strength=denoising_strength,
inpaint_full_res=inpaint_full_res,
inpaint_full_res_padding=inpaint_full_res_padding,
inpainting_mask_invert=inpainting_mask_invert,
)
p.scripts = modules.scripts.scripts_txt2img
p.script_args = args
if shared.cmd_opts.enable_console_prompts:
print(f"\nimg2img: {prompt}", file=shared.progress_print_out)
p.extra_generation_params["Mask blur"] = mask_blur
if is_batch:
assert not shared.cmd_opts.hide_ui_dir_config, "Launched with --hide-ui-dir-config, batch img2img disabled"
process_batch(p, img2img_batch_input_dir, img2img_batch_output_dir, args)
processed = Processed(p, [], p.seed, "")
else:
processed = modules.scripts.scripts_img2img.run(p, *args)
if processed is None:
processed = process_images(p)
shared.total_tqdm.clear()
generation_info_js = processed.js()
if opts.samples_log_stdout:
print(generation_info_js)
if opts.do_not_show_images:
processed.images = []
return processed.images, generation_info_js, plaintext_to_html(processed.info)
<|code_end|>
|
Long txt2img prompts cause a crash due to file name length
**Describe the bug**
```
Arguments: ('ornate intricate filigree framed, elf wearing ornate intricate detailed carved stained glass (((armor))), determined face, ((perfect face)), heavy makeup, led runes, inky swirling mist, gemstones, ((magic mist background)), ((eyeshadow)), (angry), detailed, intricate,(Alphonse Mucha), (Charlie Bowater), (Daniel Ridgway Knight), (Albert Lynch), (Richard S. Johnson)\nNegative prompt: ugly, fat, obese, chubby, (((deformed))), [blurry], bad anatomy, disfigured, poorly drawn face, mutation, mutated, (extra_limb), (ugly), (poorly drawn hands), messy drawing, large_breasts, penis, nose, eyes, lips, eyelashes, text, red_eyes', '', 'None', 'None', 20, 0, False, False, 1, 1, 7, -1.0, -1.0, 0, 0, 0, 512, 384, 0, False, 1, '', 4, '', True, False, None, '') {}
Traceback (most recent call last):
File "/home/gaz/src/ai/stable-diffusion-webui/modules/ui.py", line 134, in f
res = list(func(*args, **kwargs))
File "/home/gaz/src/ai/stable-diffusion-webui/webui.py", line 55, in f
res = func(*args, **kwargs)
File "/home/gaz/src/ai/stable-diffusion-webui/modules/txt2img.py", line 39, in txt2img
processed = process_images(p)
File "/home/gaz/src/ai/stable-diffusion-webui/modules/processing.py", line 383, in process_images
images.save_image(image, p.outpath_samples, "", seeds[i], prompts[i], opts.samples_format, info=infotext(n, i), p=p)
File "/home/gaz/src/ai/stable-diffusion-webui/modules/images.py", line 360, in save_image
image.save(fullfn, quality=opts.jpeg_quality, pnginfo=pnginfo)
File "/home/gaz/src/ai/stable-diffusion-webui/venv/lib/python3.10/site-packages/PIL/Image.py", line 2317, in save
fp = builtins.open(filename, "w+b")
OSError: [Errno 36] File name too long: 'outputs/txt2img-images/00002-1278808392-ornate intricate filigree framed, elf wearing ornate intricate detailed carved stained glass (((armor))), determined face, ((per.png'
```
**To Reproduce**
I accidentally pasted this whole thing in, including the text "negative prompt" and all the stuff after it:
> ornate intricate filigree framed, elf wearing ornate intricate detailed carved stained glass (((armor))), determined face, ((perfect face)), heavy makeup, led runes, inky swirling mist, gemstones, ((magic mist background)), ((eyeshadow)), (angry), detailed, intricate,(Alphonse Mucha), (Charlie Bowater), (Daniel Ridgway Knight), (Albert Lynch), (Richard S. Johnson)
Negative prompt: ugly, fat, obese, chubby, (((deformed))), [blurry], bad anatomy, disfigured, poorly drawn face, mutation, mutated, (extra_limb), (ugly), (poorly drawn hands), messy drawing, large_breasts, penis, nose, eyes, lips, eyelashes, text, red_eyes
It crashed with the stack trace above.
**Expected behavior**
https://discord.com/channels/1002292111942635562/1010578380300763146/1020288439880519762
**Desktop (please complete the following information):**
- OS: Linux
- Browser [e.g. chrome, safari]
- Commit revision: 8a32a71ca3223cf7b0911fe55db2c6dece2bacca
|
modules/images.py
<|code_start|>import datetime
import sys
import traceback
import pytz
import io
import math
import os
from collections import namedtuple
import re
import numpy as np
import piexif
import piexif.helper
from PIL import Image, ImageFont, ImageDraw, PngImagePlugin
from fonts.ttf import Roboto
import string
from modules import sd_samplers, shared, script_callbacks
from modules.shared import opts, cmd_opts
LANCZOS = (Image.Resampling.LANCZOS if hasattr(Image, 'Resampling') else Image.LANCZOS)
def image_grid(imgs, batch_size=1, rows=None):
if rows is None:
if opts.n_rows > 0:
rows = opts.n_rows
elif opts.n_rows == 0:
rows = batch_size
elif opts.grid_prevent_empty_spots:
rows = math.floor(math.sqrt(len(imgs)))
while len(imgs) % rows != 0:
rows -= 1
else:
rows = math.sqrt(len(imgs))
rows = round(rows)
cols = math.ceil(len(imgs) / rows)
w, h = imgs[0].size
grid = Image.new('RGB', size=(cols * w, rows * h), color='black')
for i, img in enumerate(imgs):
grid.paste(img, box=(i % cols * w, i // cols * h))
return grid
Grid = namedtuple("Grid", ["tiles", "tile_w", "tile_h", "image_w", "image_h", "overlap"])
def split_grid(image, tile_w=512, tile_h=512, overlap=64):
w = image.width
h = image.height
non_overlap_width = tile_w - overlap
non_overlap_height = tile_h - overlap
cols = math.ceil((w - overlap) / non_overlap_width)
rows = math.ceil((h - overlap) / non_overlap_height)
dx = (w - tile_w) / (cols - 1) if cols > 1 else 0
dy = (h - tile_h) / (rows - 1) if rows > 1 else 0
grid = Grid([], tile_w, tile_h, w, h, overlap)
for row in range(rows):
row_images = []
y = int(row * dy)
if y + tile_h >= h:
y = h - tile_h
for col in range(cols):
x = int(col * dx)
if x + tile_w >= w:
x = w - tile_w
tile = image.crop((x, y, x + tile_w, y + tile_h))
row_images.append([x, tile_w, tile])
grid.tiles.append([y, tile_h, row_images])
return grid
def combine_grid(grid):
def make_mask_image(r):
r = r * 255 / grid.overlap
r = r.astype(np.uint8)
return Image.fromarray(r, 'L')
mask_w = make_mask_image(np.arange(grid.overlap, dtype=np.float32).reshape((1, grid.overlap)).repeat(grid.tile_h, axis=0))
mask_h = make_mask_image(np.arange(grid.overlap, dtype=np.float32).reshape((grid.overlap, 1)).repeat(grid.image_w, axis=1))
combined_image = Image.new("RGB", (grid.image_w, grid.image_h))
for y, h, row in grid.tiles:
combined_row = Image.new("RGB", (grid.image_w, h))
for x, w, tile in row:
if x == 0:
combined_row.paste(tile, (0, 0))
continue
combined_row.paste(tile.crop((0, 0, grid.overlap, h)), (x, 0), mask=mask_w)
combined_row.paste(tile.crop((grid.overlap, 0, w, h)), (x + grid.overlap, 0))
if y == 0:
combined_image.paste(combined_row, (0, 0))
continue
combined_image.paste(combined_row.crop((0, 0, combined_row.width, grid.overlap)), (0, y), mask=mask_h)
combined_image.paste(combined_row.crop((0, grid.overlap, combined_row.width, h)), (0, y + grid.overlap))
return combined_image
class GridAnnotation:
def __init__(self, text='', is_active=True):
self.text = text
self.is_active = is_active
self.size = None
def draw_grid_annotations(im, width, height, hor_texts, ver_texts):
def wrap(drawing, text, font, line_length):
lines = ['']
for word in text.split():
line = f'{lines[-1]} {word}'.strip()
if drawing.textlength(line, font=font) <= line_length:
lines[-1] = line
else:
lines.append(word)
return lines
def draw_texts(drawing, draw_x, draw_y, lines):
for i, line in enumerate(lines):
drawing.multiline_text((draw_x, draw_y + line.size[1] / 2), line.text, font=fnt, fill=color_active if line.is_active else color_inactive, anchor="mm", align="center")
if not line.is_active:
drawing.line((draw_x - line.size[0] // 2, draw_y + line.size[1] // 2, draw_x + line.size[0] // 2, draw_y + line.size[1] // 2), fill=color_inactive, width=4)
draw_y += line.size[1] + line_spacing
fontsize = (width + height) // 25
line_spacing = fontsize // 2
try:
fnt = ImageFont.truetype(opts.font or Roboto, fontsize)
except Exception:
fnt = ImageFont.truetype(Roboto, fontsize)
color_active = (0, 0, 0)
color_inactive = (153, 153, 153)
pad_left = 0 if sum([sum([len(line.text) for line in lines]) for lines in ver_texts]) == 0 else width * 3 // 4
cols = im.width // width
rows = im.height // height
assert cols == len(hor_texts), f'bad number of horizontal texts: {len(hor_texts)}; must be {cols}'
assert rows == len(ver_texts), f'bad number of vertical texts: {len(ver_texts)}; must be {rows}'
calc_img = Image.new("RGB", (1, 1), "white")
calc_d = ImageDraw.Draw(calc_img)
for texts, allowed_width in zip(hor_texts + ver_texts, [width] * len(hor_texts) + [pad_left] * len(ver_texts)):
items = [] + texts
texts.clear()
for line in items:
wrapped = wrap(calc_d, line.text, fnt, allowed_width)
texts += [GridAnnotation(x, line.is_active) for x in wrapped]
for line in texts:
bbox = calc_d.multiline_textbbox((0, 0), line.text, font=fnt)
line.size = (bbox[2] - bbox[0], bbox[3] - bbox[1])
hor_text_heights = [sum([line.size[1] + line_spacing for line in lines]) - line_spacing for lines in hor_texts]
ver_text_heights = [sum([line.size[1] + line_spacing for line in lines]) - line_spacing * len(lines) for lines in
ver_texts]
pad_top = max(hor_text_heights) + line_spacing * 2
result = Image.new("RGB", (im.width + pad_left, im.height + pad_top), "white")
result.paste(im, (pad_left, pad_top))
d = ImageDraw.Draw(result)
for col in range(cols):
x = pad_left + width * col + width / 2
y = pad_top / 2 - hor_text_heights[col] / 2
draw_texts(d, x, y, hor_texts[col])
for row in range(rows):
x = pad_left / 2
y = pad_top + height * row + height / 2 - ver_text_heights[row] / 2
draw_texts(d, x, y, ver_texts[row])
return result
def draw_prompt_matrix(im, width, height, all_prompts):
prompts = all_prompts[1:]
boundary = math.ceil(len(prompts) / 2)
prompts_horiz = prompts[:boundary]
prompts_vert = prompts[boundary:]
hor_texts = [[GridAnnotation(x, is_active=pos & (1 << i) != 0) for i, x in enumerate(prompts_horiz)] for pos in range(1 << len(prompts_horiz))]
ver_texts = [[GridAnnotation(x, is_active=pos & (1 << i) != 0) for i, x in enumerate(prompts_vert)] for pos in range(1 << len(prompts_vert))]
return draw_grid_annotations(im, width, height, hor_texts, ver_texts)
def resize_image(resize_mode, im, width, height):
def resize(im, w, h):
if opts.upscaler_for_img2img is None or opts.upscaler_for_img2img == "None" or im.mode == 'L':
return im.resize((w, h), resample=LANCZOS)
scale = max(w / im.width, h / im.height)
if scale > 1.0:
upscalers = [x for x in shared.sd_upscalers if x.name == opts.upscaler_for_img2img]
assert len(upscalers) > 0, f"could not find upscaler named {opts.upscaler_for_img2img}"
upscaler = upscalers[0]
im = upscaler.scaler.upscale(im, scale, upscaler.data_path)
if im.width != w or im.height != h:
im = im.resize((w, h), resample=LANCZOS)
return im
if resize_mode == 0:
res = resize(im, width, height)
elif resize_mode == 1:
ratio = width / height
src_ratio = im.width / im.height
src_w = width if ratio > src_ratio else im.width * height // im.height
src_h = height if ratio <= src_ratio else im.height * width // im.width
resized = resize(im, src_w, src_h)
res = Image.new("RGB", (width, height))
res.paste(resized, box=(width // 2 - src_w // 2, height // 2 - src_h // 2))
else:
ratio = width / height
src_ratio = im.width / im.height
src_w = width if ratio < src_ratio else im.width * height // im.height
src_h = height if ratio >= src_ratio else im.height * width // im.width
resized = resize(im, src_w, src_h)
res = Image.new("RGB", (width, height))
res.paste(resized, box=(width // 2 - src_w // 2, height // 2 - src_h // 2))
if ratio < src_ratio:
fill_height = height // 2 - src_h // 2
res.paste(resized.resize((width, fill_height), box=(0, 0, width, 0)), box=(0, 0))
res.paste(resized.resize((width, fill_height), box=(0, resized.height, width, resized.height)), box=(0, fill_height + src_h))
elif ratio > src_ratio:
fill_width = width // 2 - src_w // 2
res.paste(resized.resize((fill_width, height), box=(0, 0, 0, height)), box=(0, 0))
res.paste(resized.resize((fill_width, height), box=(resized.width, 0, resized.width, height)), box=(fill_width + src_w, 0))
return res
invalid_filename_chars = '<>:"/\\|?*\n'
invalid_filename_prefix = ' '
invalid_filename_postfix = ' .'
re_nonletters = re.compile(r'[\s' + string.punctuation + ']+')
re_pattern = re.compile(r"(.*?)(?:\[([^\[\]]+)\]|$)")
re_pattern_arg = re.compile(r"(.*)<([^>]*)>$")
max_filename_part_length = 128
def sanitize_filename_part(text, replace_spaces=True):
if text is None:
return None
if replace_spaces:
text = text.replace(' ', '_')
text = text.translate({ord(x): '_' for x in invalid_filename_chars})
text = text.lstrip(invalid_filename_prefix)[:max_filename_part_length]
text = text.rstrip(invalid_filename_postfix)
return text
class FilenameGenerator:
replacements = {
'seed': lambda self: self.seed if self.seed is not None else '',
'steps': lambda self: self.p and self.p.steps,
'cfg': lambda self: self.p and self.p.cfg_scale,
'width': lambda self: self.p and self.p.width,
'height': lambda self: self.p and self.p.height,
'styles': lambda self: self.p and sanitize_filename_part(", ".join([style for style in self.p.styles if not style == "None"]) or "None", replace_spaces=False),
'sampler': lambda self: self.p and sanitize_filename_part(sd_samplers.samplers[self.p.sampler_index].name, replace_spaces=False),
'model_hash': lambda self: getattr(self.p, "sd_model_hash", shared.sd_model.sd_model_hash),
'date': lambda self: datetime.datetime.now().strftime('%Y-%m-%d'),
'datetime': lambda self, *args: self.datetime(*args), # accepts formats: [datetime], [datetime<Format>], [datetime<Format><Time Zone>]
'job_timestamp': lambda self: getattr(self.p, "job_timestamp", shared.state.job_timestamp),
'prompt': lambda self: sanitize_filename_part(self.prompt),
'prompt_no_styles': lambda self: self.prompt_no_style(),
'prompt_spaces': lambda self: sanitize_filename_part(self.prompt, replace_spaces=False),
'prompt_words': lambda self: self.prompt_words(),
}
default_time_format = '%Y%m%d%H%M%S'
def __init__(self, p, seed, prompt):
self.p = p
self.seed = seed
self.prompt = prompt
def prompt_no_style(self):
if self.p is None or self.prompt is None:
return None
prompt_no_style = self.prompt
for style in shared.prompt_styles.get_style_prompts(self.p.styles):
if len(style) > 0:
for part in style.split("{prompt}"):
prompt_no_style = prompt_no_style.replace(part, "").replace(", ,", ",").strip().strip(',')
prompt_no_style = prompt_no_style.replace(style, "").strip().strip(',').strip()
return sanitize_filename_part(prompt_no_style, replace_spaces=False)
def prompt_words(self):
words = [x for x in re_nonletters.split(self.prompt or "") if len(x) > 0]
if len(words) == 0:
words = ["empty"]
return sanitize_filename_part(" ".join(words[0:opts.directories_max_prompt_words]), replace_spaces=False)
def datetime(self, *args):
time_datetime = datetime.datetime.now()
time_format = args[0] if len(args) > 0 and args[0] != "" else self.default_time_format
try:
time_zone = pytz.timezone(args[1]) if len(args) > 1 else None
except pytz.exceptions.UnknownTimeZoneError as _:
time_zone = None
time_zone_time = time_datetime.astimezone(time_zone)
try:
formatted_time = time_zone_time.strftime(time_format)
except (ValueError, TypeError) as _:
formatted_time = time_zone_time.strftime(self.default_time_format)
return sanitize_filename_part(formatted_time, replace_spaces=False)
def apply(self, x):
res = ''
for m in re_pattern.finditer(x):
text, pattern = m.groups()
res += text
if pattern is None:
continue
pattern_args = []
while True:
m = re_pattern_arg.match(pattern)
if m is None:
break
pattern, arg = m.groups()
pattern_args.insert(0, arg)
fun = self.replacements.get(pattern.lower())
if fun is not None:
try:
replacement = fun(self, *pattern_args)
except Exception:
replacement = None
print(f"Error adding [{pattern}] to filename", file=sys.stderr)
print(traceback.format_exc(), file=sys.stderr)
if replacement is not None:
res += str(replacement)
continue
res += f'[{pattern}]'
return res
def get_next_sequence_number(path, basename):
"""
Determines and returns the next sequence number to use when saving an image in the specified directory.
The sequence starts at 0.
"""
result = -1
if basename != '':
basename = basename + "-"
prefix_length = len(basename)
for p in os.listdir(path):
if p.startswith(basename):
l = os.path.splitext(p[prefix_length:])[0].split('-') # splits the filename (removing the basename first if one is defined, so the sequence number is always the first element)
try:
result = max(int(l[0]), result)
except ValueError:
pass
return result + 1
def save_image(image, path, basename, seed=None, prompt=None, extension='png', info=None, short_filename=False, no_prompt=False, grid=False, pnginfo_section_name='parameters', p=None, existing_info=None, forced_filename=None, suffix="", save_to_dirs=None):
"""Save an image.
Args:
image (`PIL.Image`):
The image to be saved.
path (`str`):
The directory to save the image. Note, the option `save_to_dirs` will make the image to be saved into a sub directory.
basename (`str`):
The base filename which will be applied to `filename pattern`.
seed, prompt, short_filename,
extension (`str`):
Image file extension, default is `png`.
pngsectionname (`str`):
Specify the name of the section which `info` will be saved in.
info (`str` or `PngImagePlugin.iTXt`):
PNG info chunks.
existing_info (`dict`):
Additional PNG info. `existing_info == {pngsectionname: info, ...}`
no_prompt:
TODO I don't know its meaning.
p (`StableDiffusionProcessing`)
forced_filename (`str`):
If specified, `basename` and filename pattern will be ignored.
save_to_dirs (bool):
If true, the image will be saved into a subdirectory of `path`.
Returns: (fullfn, txt_fullfn)
fullfn (`str`):
The full path of the saved imaged.
txt_fullfn (`str` or None):
If a text file is saved for this image, this will be its full path. Otherwise None.
"""
namegen = FilenameGenerator(p, seed, prompt)
if save_to_dirs is None:
save_to_dirs = (grid and opts.grid_save_to_dirs) or (not grid and opts.save_to_dirs and not no_prompt)
if save_to_dirs:
dirname = namegen.apply(opts.directories_filename_pattern or "[prompt_words]").lstrip(' ').rstrip('\\ /')
path = os.path.join(path, dirname)
os.makedirs(path, exist_ok=True)
if forced_filename is None:
if short_filename or seed is None:
file_decoration = ""
elif opts.save_to_dirs:
file_decoration = opts.samples_filename_pattern or "[seed]"
else:
file_decoration = opts.samples_filename_pattern or "[seed]-[prompt_spaces]"
add_number = opts.save_images_add_number or file_decoration == ''
if file_decoration != "" and add_number:
file_decoration = "-" + file_decoration
file_decoration = namegen.apply(file_decoration) + suffix
if add_number:
basecount = get_next_sequence_number(path, basename)
fullfn = None
for i in range(500):
fn = f"{basecount + i:05}" if basename == '' else f"{basename}-{basecount + i:04}"
fullfn = os.path.join(path, f"{fn}{file_decoration}.{extension}")
if not os.path.exists(fullfn):
break
else:
fullfn = os.path.join(path, f"{file_decoration}.{extension}")
else:
fullfn = os.path.join(path, f"{forced_filename}.{extension}")
pnginfo = existing_info or {}
if info is not None:
pnginfo[pnginfo_section_name] = info
params = script_callbacks.ImageSaveParams(image, p, fullfn, pnginfo)
script_callbacks.before_image_saved_callback(params)
image = params.image
fullfn = params.filename
info = params.pnginfo.get(pnginfo_section_name, None)
fullfn_without_extension, extension = os.path.splitext(params.filename)
def exif_bytes():
return piexif.dump({
"Exif": {
piexif.ExifIFD.UserComment: piexif.helper.UserComment.dump(info or "", encoding="unicode")
},
})
if extension.lower() == '.png':
pnginfo_data = PngImagePlugin.PngInfo()
for k, v in params.pnginfo.items():
pnginfo_data.add_text(k, str(v))
image.save(fullfn, quality=opts.jpeg_quality, pnginfo=pnginfo_data)
elif extension.lower() in (".jpg", ".jpeg", ".webp"):
image.save(fullfn, quality=opts.jpeg_quality)
if opts.enable_pnginfo and info is not None:
piexif.insert(exif_bytes(), fullfn)
else:
image.save(fullfn, quality=opts.jpeg_quality)
target_side_length = 4000
oversize = image.width > target_side_length or image.height > target_side_length
if opts.export_for_4chan and (oversize or os.stat(fullfn).st_size > 4 * 1024 * 1024):
ratio = image.width / image.height
if oversize and ratio > 1:
image = image.resize((target_side_length, image.height * target_side_length // image.width), LANCZOS)
elif oversize:
image = image.resize((image.width * target_side_length // image.height, target_side_length), LANCZOS)
image.save(fullfn_without_extension + ".jpg", quality=opts.jpeg_quality)
if opts.enable_pnginfo and info is not None:
piexif.insert(exif_bytes(), fullfn_without_extension + ".jpg")
if opts.save_txt and info is not None:
txt_fullfn = f"{fullfn_without_extension}.txt"
with open(txt_fullfn, "w", encoding="utf8") as file:
file.write(info + "\n")
else:
txt_fullfn = None
script_callbacks.image_saved_callback(params)
return fullfn, txt_fullfn
def image_data(data):
try:
image = Image.open(io.BytesIO(data))
textinfo = image.text["parameters"]
return textinfo, None
except Exception:
pass
try:
text = data.decode('utf8')
assert len(text) < 10000
return text, None
except Exception:
pass
return '', None
<|code_end|>
|
modules/images.py
<|code_start|>import datetime
import sys
import traceback
import pytz
import io
import math
import os
from collections import namedtuple
import re
import numpy as np
import piexif
import piexif.helper
from PIL import Image, ImageFont, ImageDraw, PngImagePlugin
from fonts.ttf import Roboto
import string
from modules import sd_samplers, shared, script_callbacks
from modules.shared import opts, cmd_opts
LANCZOS = (Image.Resampling.LANCZOS if hasattr(Image, 'Resampling') else Image.LANCZOS)
def image_grid(imgs, batch_size=1, rows=None):
if rows is None:
if opts.n_rows > 0:
rows = opts.n_rows
elif opts.n_rows == 0:
rows = batch_size
elif opts.grid_prevent_empty_spots:
rows = math.floor(math.sqrt(len(imgs)))
while len(imgs) % rows != 0:
rows -= 1
else:
rows = math.sqrt(len(imgs))
rows = round(rows)
cols = math.ceil(len(imgs) / rows)
w, h = imgs[0].size
grid = Image.new('RGB', size=(cols * w, rows * h), color='black')
for i, img in enumerate(imgs):
grid.paste(img, box=(i % cols * w, i // cols * h))
return grid
Grid = namedtuple("Grid", ["tiles", "tile_w", "tile_h", "image_w", "image_h", "overlap"])
def split_grid(image, tile_w=512, tile_h=512, overlap=64):
w = image.width
h = image.height
non_overlap_width = tile_w - overlap
non_overlap_height = tile_h - overlap
cols = math.ceil((w - overlap) / non_overlap_width)
rows = math.ceil((h - overlap) / non_overlap_height)
dx = (w - tile_w) / (cols - 1) if cols > 1 else 0
dy = (h - tile_h) / (rows - 1) if rows > 1 else 0
grid = Grid([], tile_w, tile_h, w, h, overlap)
for row in range(rows):
row_images = []
y = int(row * dy)
if y + tile_h >= h:
y = h - tile_h
for col in range(cols):
x = int(col * dx)
if x + tile_w >= w:
x = w - tile_w
tile = image.crop((x, y, x + tile_w, y + tile_h))
row_images.append([x, tile_w, tile])
grid.tiles.append([y, tile_h, row_images])
return grid
def combine_grid(grid):
def make_mask_image(r):
r = r * 255 / grid.overlap
r = r.astype(np.uint8)
return Image.fromarray(r, 'L')
mask_w = make_mask_image(np.arange(grid.overlap, dtype=np.float32).reshape((1, grid.overlap)).repeat(grid.tile_h, axis=0))
mask_h = make_mask_image(np.arange(grid.overlap, dtype=np.float32).reshape((grid.overlap, 1)).repeat(grid.image_w, axis=1))
combined_image = Image.new("RGB", (grid.image_w, grid.image_h))
for y, h, row in grid.tiles:
combined_row = Image.new("RGB", (grid.image_w, h))
for x, w, tile in row:
if x == 0:
combined_row.paste(tile, (0, 0))
continue
combined_row.paste(tile.crop((0, 0, grid.overlap, h)), (x, 0), mask=mask_w)
combined_row.paste(tile.crop((grid.overlap, 0, w, h)), (x + grid.overlap, 0))
if y == 0:
combined_image.paste(combined_row, (0, 0))
continue
combined_image.paste(combined_row.crop((0, 0, combined_row.width, grid.overlap)), (0, y), mask=mask_h)
combined_image.paste(combined_row.crop((0, grid.overlap, combined_row.width, h)), (0, y + grid.overlap))
return combined_image
class GridAnnotation:
def __init__(self, text='', is_active=True):
self.text = text
self.is_active = is_active
self.size = None
def draw_grid_annotations(im, width, height, hor_texts, ver_texts):
def wrap(drawing, text, font, line_length):
lines = ['']
for word in text.split():
line = f'{lines[-1]} {word}'.strip()
if drawing.textlength(line, font=font) <= line_length:
lines[-1] = line
else:
lines.append(word)
return lines
def draw_texts(drawing, draw_x, draw_y, lines):
for i, line in enumerate(lines):
drawing.multiline_text((draw_x, draw_y + line.size[1] / 2), line.text, font=fnt, fill=color_active if line.is_active else color_inactive, anchor="mm", align="center")
if not line.is_active:
drawing.line((draw_x - line.size[0] // 2, draw_y + line.size[1] // 2, draw_x + line.size[0] // 2, draw_y + line.size[1] // 2), fill=color_inactive, width=4)
draw_y += line.size[1] + line_spacing
fontsize = (width + height) // 25
line_spacing = fontsize // 2
try:
fnt = ImageFont.truetype(opts.font or Roboto, fontsize)
except Exception:
fnt = ImageFont.truetype(Roboto, fontsize)
color_active = (0, 0, 0)
color_inactive = (153, 153, 153)
pad_left = 0 if sum([sum([len(line.text) for line in lines]) for lines in ver_texts]) == 0 else width * 3 // 4
cols = im.width // width
rows = im.height // height
assert cols == len(hor_texts), f'bad number of horizontal texts: {len(hor_texts)}; must be {cols}'
assert rows == len(ver_texts), f'bad number of vertical texts: {len(ver_texts)}; must be {rows}'
calc_img = Image.new("RGB", (1, 1), "white")
calc_d = ImageDraw.Draw(calc_img)
for texts, allowed_width in zip(hor_texts + ver_texts, [width] * len(hor_texts) + [pad_left] * len(ver_texts)):
items = [] + texts
texts.clear()
for line in items:
wrapped = wrap(calc_d, line.text, fnt, allowed_width)
texts += [GridAnnotation(x, line.is_active) for x in wrapped]
for line in texts:
bbox = calc_d.multiline_textbbox((0, 0), line.text, font=fnt)
line.size = (bbox[2] - bbox[0], bbox[3] - bbox[1])
hor_text_heights = [sum([line.size[1] + line_spacing for line in lines]) - line_spacing for lines in hor_texts]
ver_text_heights = [sum([line.size[1] + line_spacing for line in lines]) - line_spacing * len(lines) for lines in
ver_texts]
pad_top = max(hor_text_heights) + line_spacing * 2
result = Image.new("RGB", (im.width + pad_left, im.height + pad_top), "white")
result.paste(im, (pad_left, pad_top))
d = ImageDraw.Draw(result)
for col in range(cols):
x = pad_left + width * col + width / 2
y = pad_top / 2 - hor_text_heights[col] / 2
draw_texts(d, x, y, hor_texts[col])
for row in range(rows):
x = pad_left / 2
y = pad_top + height * row + height / 2 - ver_text_heights[row] / 2
draw_texts(d, x, y, ver_texts[row])
return result
def draw_prompt_matrix(im, width, height, all_prompts):
prompts = all_prompts[1:]
boundary = math.ceil(len(prompts) / 2)
prompts_horiz = prompts[:boundary]
prompts_vert = prompts[boundary:]
hor_texts = [[GridAnnotation(x, is_active=pos & (1 << i) != 0) for i, x in enumerate(prompts_horiz)] for pos in range(1 << len(prompts_horiz))]
ver_texts = [[GridAnnotation(x, is_active=pos & (1 << i) != 0) for i, x in enumerate(prompts_vert)] for pos in range(1 << len(prompts_vert))]
return draw_grid_annotations(im, width, height, hor_texts, ver_texts)
def resize_image(resize_mode, im, width, height):
def resize(im, w, h):
if opts.upscaler_for_img2img is None or opts.upscaler_for_img2img == "None" or im.mode == 'L':
return im.resize((w, h), resample=LANCZOS)
scale = max(w / im.width, h / im.height)
if scale > 1.0:
upscalers = [x for x in shared.sd_upscalers if x.name == opts.upscaler_for_img2img]
assert len(upscalers) > 0, f"could not find upscaler named {opts.upscaler_for_img2img}"
upscaler = upscalers[0]
im = upscaler.scaler.upscale(im, scale, upscaler.data_path)
if im.width != w or im.height != h:
im = im.resize((w, h), resample=LANCZOS)
return im
if resize_mode == 0:
res = resize(im, width, height)
elif resize_mode == 1:
ratio = width / height
src_ratio = im.width / im.height
src_w = width if ratio > src_ratio else im.width * height // im.height
src_h = height if ratio <= src_ratio else im.height * width // im.width
resized = resize(im, src_w, src_h)
res = Image.new("RGB", (width, height))
res.paste(resized, box=(width // 2 - src_w // 2, height // 2 - src_h // 2))
else:
ratio = width / height
src_ratio = im.width / im.height
src_w = width if ratio < src_ratio else im.width * height // im.height
src_h = height if ratio >= src_ratio else im.height * width // im.width
resized = resize(im, src_w, src_h)
res = Image.new("RGB", (width, height))
res.paste(resized, box=(width // 2 - src_w // 2, height // 2 - src_h // 2))
if ratio < src_ratio:
fill_height = height // 2 - src_h // 2
res.paste(resized.resize((width, fill_height), box=(0, 0, width, 0)), box=(0, 0))
res.paste(resized.resize((width, fill_height), box=(0, resized.height, width, resized.height)), box=(0, fill_height + src_h))
elif ratio > src_ratio:
fill_width = width // 2 - src_w // 2
res.paste(resized.resize((fill_width, height), box=(0, 0, 0, height)), box=(0, 0))
res.paste(resized.resize((fill_width, height), box=(resized.width, 0, resized.width, height)), box=(fill_width + src_w, 0))
return res
invalid_filename_chars = '<>:"/\\|?*\n'
invalid_filename_prefix = ' '
invalid_filename_postfix = ' .'
re_nonletters = re.compile(r'[\s' + string.punctuation + ']+')
re_pattern = re.compile(r"(.*?)(?:\[([^\[\]]+)\]|$)")
re_pattern_arg = re.compile(r"(.*)<([^>]*)>$")
max_filename_part_length = 128
def sanitize_filename_part(text, replace_spaces=True):
if text is None:
return None
if replace_spaces:
text = text.replace(' ', '_')
text = text.translate({ord(x): '_' for x in invalid_filename_chars})
text = text.lstrip(invalid_filename_prefix)[:max_filename_part_length]
text = text.rstrip(invalid_filename_postfix)
return text
class FilenameGenerator:
replacements = {
'seed': lambda self: self.seed if self.seed is not None else '',
'steps': lambda self: self.p and self.p.steps,
'cfg': lambda self: self.p and self.p.cfg_scale,
'width': lambda self: self.p and self.p.width,
'height': lambda self: self.p and self.p.height,
'styles': lambda self: self.p and sanitize_filename_part(", ".join([style for style in self.p.styles if not style == "None"]) or "None", replace_spaces=False),
'sampler': lambda self: self.p and sanitize_filename_part(sd_samplers.samplers[self.p.sampler_index].name, replace_spaces=False),
'model_hash': lambda self: getattr(self.p, "sd_model_hash", shared.sd_model.sd_model_hash),
'date': lambda self: datetime.datetime.now().strftime('%Y-%m-%d'),
'datetime': lambda self, *args: self.datetime(*args), # accepts formats: [datetime], [datetime<Format>], [datetime<Format><Time Zone>]
'job_timestamp': lambda self: getattr(self.p, "job_timestamp", shared.state.job_timestamp),
'prompt': lambda self: sanitize_filename_part(self.prompt),
'prompt_no_styles': lambda self: self.prompt_no_style(),
'prompt_spaces': lambda self: sanitize_filename_part(self.prompt, replace_spaces=False),
'prompt_words': lambda self: self.prompt_words(),
}
default_time_format = '%Y%m%d%H%M%S'
def __init__(self, p, seed, prompt):
self.p = p
self.seed = seed
self.prompt = prompt
def prompt_no_style(self):
if self.p is None or self.prompt is None:
return None
prompt_no_style = self.prompt
for style in shared.prompt_styles.get_style_prompts(self.p.styles):
if len(style) > 0:
for part in style.split("{prompt}"):
prompt_no_style = prompt_no_style.replace(part, "").replace(", ,", ",").strip().strip(',')
prompt_no_style = prompt_no_style.replace(style, "").strip().strip(',').strip()
return sanitize_filename_part(prompt_no_style, replace_spaces=False)
def prompt_words(self):
words = [x for x in re_nonletters.split(self.prompt or "") if len(x) > 0]
if len(words) == 0:
words = ["empty"]
return sanitize_filename_part(" ".join(words[0:opts.directories_max_prompt_words]), replace_spaces=False)
def datetime(self, *args):
time_datetime = datetime.datetime.now()
time_format = args[0] if len(args) > 0 and args[0] != "" else self.default_time_format
try:
time_zone = pytz.timezone(args[1]) if len(args) > 1 else None
except pytz.exceptions.UnknownTimeZoneError as _:
time_zone = None
time_zone_time = time_datetime.astimezone(time_zone)
try:
formatted_time = time_zone_time.strftime(time_format)
except (ValueError, TypeError) as _:
formatted_time = time_zone_time.strftime(self.default_time_format)
return sanitize_filename_part(formatted_time, replace_spaces=False)
def apply(self, x):
res = ''
for m in re_pattern.finditer(x):
text, pattern = m.groups()
res += text
if pattern is None:
continue
pattern_args = []
while True:
m = re_pattern_arg.match(pattern)
if m is None:
break
pattern, arg = m.groups()
pattern_args.insert(0, arg)
fun = self.replacements.get(pattern.lower())
if fun is not None:
try:
replacement = fun(self, *pattern_args)
except Exception:
replacement = None
print(f"Error adding [{pattern}] to filename", file=sys.stderr)
print(traceback.format_exc(), file=sys.stderr)
if replacement is not None:
res += str(replacement)
continue
res += f'[{pattern}]'
return res
def get_next_sequence_number(path, basename):
"""
Determines and returns the next sequence number to use when saving an image in the specified directory.
The sequence starts at 0.
"""
result = -1
if basename != '':
basename = basename + "-"
prefix_length = len(basename)
for p in os.listdir(path):
if p.startswith(basename):
l = os.path.splitext(p[prefix_length:])[0].split('-') # splits the filename (removing the basename first if one is defined, so the sequence number is always the first element)
try:
result = max(int(l[0]), result)
except ValueError:
pass
return result + 1
def truncate_fullpath(full_path, encoding='utf-8'):
dir_name, full_name = os.path.split(full_path)
file_name, file_ext = os.path.splitext(full_name)
max_length = os.statvfs(dir_name).f_namemax
file_name_truncated = file_name.encode(encoding)[:max_length - len(file_ext)].decode(encoding, 'ignore')
return os.path.join(dir_name , file_name_truncated + file_ext)
def save_image(image, path, basename, seed=None, prompt=None, extension='png', info=None, short_filename=False, no_prompt=False, grid=False, pnginfo_section_name='parameters', p=None, existing_info=None, forced_filename=None, suffix="", save_to_dirs=None):
"""Save an image.
Args:
image (`PIL.Image`):
The image to be saved.
path (`str`):
The directory to save the image. Note, the option `save_to_dirs` will make the image to be saved into a sub directory.
basename (`str`):
The base filename which will be applied to `filename pattern`.
seed, prompt, short_filename,
extension (`str`):
Image file extension, default is `png`.
pngsectionname (`str`):
Specify the name of the section which `info` will be saved in.
info (`str` or `PngImagePlugin.iTXt`):
PNG info chunks.
existing_info (`dict`):
Additional PNG info. `existing_info == {pngsectionname: info, ...}`
no_prompt:
TODO I don't know its meaning.
p (`StableDiffusionProcessing`)
forced_filename (`str`):
If specified, `basename` and filename pattern will be ignored.
save_to_dirs (bool):
If true, the image will be saved into a subdirectory of `path`.
Returns: (fullfn, txt_fullfn)
fullfn (`str`):
The full path of the saved imaged.
txt_fullfn (`str` or None):
If a text file is saved for this image, this will be its full path. Otherwise None.
"""
namegen = FilenameGenerator(p, seed, prompt)
if save_to_dirs is None:
save_to_dirs = (grid and opts.grid_save_to_dirs) or (not grid and opts.save_to_dirs and not no_prompt)
if save_to_dirs:
dirname = namegen.apply(opts.directories_filename_pattern or "[prompt_words]").lstrip(' ').rstrip('\\ /')
path = truncate_fullpath(os.path.join(path, dirname))
os.makedirs(path, exist_ok=True)
if forced_filename is None:
if short_filename or seed is None:
file_decoration = ""
elif opts.save_to_dirs:
file_decoration = opts.samples_filename_pattern or "[seed]"
else:
file_decoration = opts.samples_filename_pattern or "[seed]-[prompt_spaces]"
add_number = opts.save_images_add_number or file_decoration == ''
if file_decoration != "" and add_number:
file_decoration = "-" + file_decoration
file_decoration = namegen.apply(file_decoration) + suffix
if add_number:
basecount = get_next_sequence_number(path, basename)
fullfn = None
for i in range(500):
fn = f"{basecount + i:05}" if basename == '' else f"{basename}-{basecount + i:04}"
fullfn = truncate_fullpath(os.path.join(path, f"{fn}{file_decoration}.{extension}"))
if not os.path.exists(fullfn):
break
else:
fullfn = truncate_fullpath(os.path.join(path, f"{file_decoration}.{extension}"))
else:
fullfn = truncate_fullpath(os.path.join(path, f"{forced_filename}.{extension}"))
pnginfo = existing_info or {}
if info is not None:
pnginfo[pnginfo_section_name] = info
params = script_callbacks.ImageSaveParams(image, p, fullfn, pnginfo)
script_callbacks.before_image_saved_callback(params)
image = params.image
fullfn = params.filename
info = params.pnginfo.get(pnginfo_section_name, None)
fullfn_without_extension, extension = os.path.splitext(params.filename)
def exif_bytes():
return piexif.dump({
"Exif": {
piexif.ExifIFD.UserComment: piexif.helper.UserComment.dump(info or "", encoding="unicode")
},
})
if extension.lower() == '.png':
pnginfo_data = PngImagePlugin.PngInfo()
for k, v in params.pnginfo.items():
pnginfo_data.add_text(k, str(v))
image.save(fullfn, quality=opts.jpeg_quality, pnginfo=pnginfo_data)
elif extension.lower() in (".jpg", ".jpeg", ".webp"):
image.save(fullfn, quality=opts.jpeg_quality)
if opts.enable_pnginfo and info is not None:
piexif.insert(exif_bytes(), fullfn)
else:
image.save(fullfn, quality=opts.jpeg_quality)
target_side_length = 4000
oversize = image.width > target_side_length or image.height > target_side_length
if opts.export_for_4chan and (oversize or os.stat(fullfn).st_size > 4 * 1024 * 1024):
ratio = image.width / image.height
if oversize and ratio > 1:
image = image.resize((target_side_length, image.height * target_side_length // image.width), LANCZOS)
elif oversize:
image = image.resize((image.width * target_side_length // image.height, target_side_length), LANCZOS)
image.save(fullfn_without_extension + ".jpg", quality=opts.jpeg_quality)
if opts.enable_pnginfo and info is not None:
piexif.insert(exif_bytes(), fullfn_without_extension + ".jpg")
if opts.save_txt and info is not None:
txt_fullfn = f"{fullfn_without_extension}.txt"
with open(txt_fullfn, "w", encoding="utf8") as file:
file.write(info + "\n")
else:
txt_fullfn = None
script_callbacks.image_saved_callback(params)
return fullfn, txt_fullfn
def image_data(data):
try:
image = Image.open(io.BytesIO(data))
textinfo = image.text["parameters"]
return textinfo, None
except Exception:
pass
try:
text = data.decode('utf8')
assert len(text) < 10000
return text, None
except Exception:
pass
return '', None
<|code_end|>
|
[Bug]: loss is logged after 1 step, and logging (all) is off by 1 step
### Is there an existing issue for this?
- [X] I have searched the existing issues and checked the recent builds/commits
### What happened?
1. Logging is off by 1 step, for the loss CSV, model, and previews
I put a counter which increments after optimizer.step(). I set the training to log every 10 steps, but it does so after 11.

This happened because step is defined by i (+ ititial_steps). i indicates the current step and is 0-indexed. It doesn't indicate the count of steps done. But the logging is done with step % save_hypernetwork_every == 0. So when it logs at step=10, it has actually done 11 steps (0-10).
2. Loss logging is done after 1 step.
Loss log in CSV always starts with steps being 1. This happened because it uses step % training_write_csv_every, while step is 0-indexed as previously mentioned. Then it logs with step+1. So every next log is off by 1 step. It didn't check if step > 0 either.
### Steps to reproduce the problem
1. Set a value (10 here) for csv log in settings
2. Set a value (10 here) for model and preview logs
3. Train a hypernetwork or textual embedding
### What should have happened?
Logging should be done every n steps as set by user.
### Commit where the problem happens
737eb28
### What platforms do you use to access UI ?
Other/Cloud
### What browsers do you use to access the UI ?
Google Chrome
### Command Line Arguments
```Shell
--autolaunch --ckpt {workspace3}/novelai/final_pruned.ckpt --deepdanbooru --disable-safe-unpickle --no-half-vae
```
### Additional information, context and logs
I'll try
|
modules/hypernetworks/hypernetwork.py
<|code_start|>import csv
import datetime
import glob
import html
import os
import sys
import traceback
import inspect
import modules.textual_inversion.dataset
import torch
import tqdm
from einops import rearrange, repeat
from ldm.util import default
from modules import devices, processing, sd_models, shared
from modules.textual_inversion import textual_inversion
from modules.textual_inversion.learn_schedule import LearnRateScheduler
from torch import einsum
from torch.nn.init import normal_, xavier_normal_, xavier_uniform_, kaiming_normal_, kaiming_uniform_, zeros_
from collections import defaultdict, deque
from statistics import stdev, mean
class HypernetworkModule(torch.nn.Module):
multiplier = 1.0
activation_dict = {
"relu": torch.nn.ReLU,
"leakyrelu": torch.nn.LeakyReLU,
"elu": torch.nn.ELU,
"swish": torch.nn.Hardswish,
"tanh": torch.nn.Tanh,
"sigmoid": torch.nn.Sigmoid,
}
activation_dict.update({cls_name.lower(): cls_obj for cls_name, cls_obj in inspect.getmembers(torch.nn.modules.activation) if inspect.isclass(cls_obj) and cls_obj.__module__ == 'torch.nn.modules.activation'})
def __init__(self, dim, state_dict=None, layer_structure=None, activation_func=None, weight_init='Normal', add_layer_norm=False, use_dropout=False):
super().__init__()
assert layer_structure is not None, "layer_structure must not be None"
assert layer_structure[0] == 1, "Multiplier Sequence should start with size 1!"
assert layer_structure[-1] == 1, "Multiplier Sequence should end with size 1!"
linears = []
for i in range(len(layer_structure) - 1):
# Add a fully-connected layer
linears.append(torch.nn.Linear(int(dim * layer_structure[i]), int(dim * layer_structure[i+1])))
# Add an activation func
if activation_func == "linear" or activation_func is None:
pass
elif activation_func in self.activation_dict:
linears.append(self.activation_dict[activation_func]())
else:
raise RuntimeError(f'hypernetwork uses an unsupported activation function: {activation_func}')
# Add layer normalization
if add_layer_norm:
linears.append(torch.nn.LayerNorm(int(dim * layer_structure[i+1])))
# Add dropout expect last layer
if use_dropout and i < len(layer_structure) - 3:
linears.append(torch.nn.Dropout(p=0.3))
self.linear = torch.nn.Sequential(*linears)
if state_dict is not None:
self.fix_old_state_dict(state_dict)
self.load_state_dict(state_dict)
else:
for layer in self.linear:
if type(layer) == torch.nn.Linear or type(layer) == torch.nn.LayerNorm:
w, b = layer.weight.data, layer.bias.data
if weight_init == "Normal" or type(layer) == torch.nn.LayerNorm:
normal_(w, mean=0.0, std=0.01)
normal_(b, mean=0.0, std=0.005)
elif weight_init == 'XavierUniform':
xavier_uniform_(w)
zeros_(b)
elif weight_init == 'XavierNormal':
xavier_normal_(w)
zeros_(b)
elif weight_init == 'KaimingUniform':
kaiming_uniform_(w, nonlinearity='leaky_relu' if 'leakyrelu' == activation_func else 'relu')
zeros_(b)
elif weight_init == 'KaimingNormal':
kaiming_normal_(w, nonlinearity='leaky_relu' if 'leakyrelu' == activation_func else 'relu')
zeros_(b)
else:
raise KeyError(f"Key {weight_init} is not defined as initialization!")
self.to(devices.device)
def fix_old_state_dict(self, state_dict):
changes = {
'linear1.bias': 'linear.0.bias',
'linear1.weight': 'linear.0.weight',
'linear2.bias': 'linear.1.bias',
'linear2.weight': 'linear.1.weight',
}
for fr, to in changes.items():
x = state_dict.get(fr, None)
if x is None:
continue
del state_dict[fr]
state_dict[to] = x
def forward(self, x):
return x + self.linear(x) * self.multiplier
def trainables(self):
layer_structure = []
for layer in self.linear:
if type(layer) == torch.nn.Linear or type(layer) == torch.nn.LayerNorm:
layer_structure += [layer.weight, layer.bias]
return layer_structure
def apply_strength(value=None):
HypernetworkModule.multiplier = value if value is not None else shared.opts.sd_hypernetwork_strength
class Hypernetwork:
filename = None
name = None
def __init__(self, name=None, enable_sizes=None, layer_structure=None, activation_func=None, weight_init=None, add_layer_norm=False, use_dropout=False):
self.filename = None
self.name = name
self.layers = {}
self.step = 0
self.sd_checkpoint = None
self.sd_checkpoint_name = None
self.layer_structure = layer_structure
self.activation_func = activation_func
self.weight_init = weight_init
self.add_layer_norm = add_layer_norm
self.use_dropout = use_dropout
for size in enable_sizes or []:
self.layers[size] = (
HypernetworkModule(size, None, self.layer_structure, self.activation_func, self.weight_init, self.add_layer_norm, self.use_dropout),
HypernetworkModule(size, None, self.layer_structure, self.activation_func, self.weight_init, self.add_layer_norm, self.use_dropout),
)
def weights(self):
res = []
for k, layers in self.layers.items():
for layer in layers:
layer.train()
res += layer.trainables()
return res
def save(self, filename):
state_dict = {}
for k, v in self.layers.items():
state_dict[k] = (v[0].state_dict(), v[1].state_dict())
state_dict['step'] = self.step
state_dict['name'] = self.name
state_dict['layer_structure'] = self.layer_structure
state_dict['activation_func'] = self.activation_func
state_dict['is_layer_norm'] = self.add_layer_norm
state_dict['weight_initialization'] = self.weight_init
state_dict['use_dropout'] = self.use_dropout
state_dict['sd_checkpoint'] = self.sd_checkpoint
state_dict['sd_checkpoint_name'] = self.sd_checkpoint_name
torch.save(state_dict, filename)
def load(self, filename):
self.filename = filename
if self.name is None:
self.name = os.path.splitext(os.path.basename(filename))[0]
state_dict = torch.load(filename, map_location='cpu')
self.layer_structure = state_dict.get('layer_structure', [1, 2, 1])
print(self.layer_structure)
self.activation_func = state_dict.get('activation_func', None)
print(f"Activation function is {self.activation_func}")
self.weight_init = state_dict.get('weight_initialization', 'Normal')
print(f"Weight initialization is {self.weight_init}")
self.add_layer_norm = state_dict.get('is_layer_norm', False)
print(f"Layer norm is set to {self.add_layer_norm}")
self.use_dropout = state_dict.get('use_dropout', False)
print(f"Dropout usage is set to {self.use_dropout}" )
for size, sd in state_dict.items():
if type(size) == int:
self.layers[size] = (
HypernetworkModule(size, sd[0], self.layer_structure, self.activation_func, self.weight_init, self.add_layer_norm, self.use_dropout),
HypernetworkModule(size, sd[1], self.layer_structure, self.activation_func, self.weight_init, self.add_layer_norm, self.use_dropout),
)
self.name = state_dict.get('name', self.name)
self.step = state_dict.get('step', 0)
self.sd_checkpoint = state_dict.get('sd_checkpoint', None)
self.sd_checkpoint_name = state_dict.get('sd_checkpoint_name', None)
def list_hypernetworks(path):
res = {}
for filename in glob.iglob(os.path.join(path, '**/*.pt'), recursive=True):
name = os.path.splitext(os.path.basename(filename))[0]
res[name] = filename
return res
def load_hypernetwork(filename):
path = shared.hypernetworks.get(filename, None)
if path is not None:
print(f"Loading hypernetwork {filename}")
try:
shared.loaded_hypernetwork = Hypernetwork()
shared.loaded_hypernetwork.load(path)
except Exception:
print(f"Error loading hypernetwork {path}", file=sys.stderr)
print(traceback.format_exc(), file=sys.stderr)
else:
if shared.loaded_hypernetwork is not None:
print(f"Unloading hypernetwork")
shared.loaded_hypernetwork = None
def find_closest_hypernetwork_name(search: str):
if not search:
return None
search = search.lower()
applicable = [name for name in shared.hypernetworks if search in name.lower()]
if not applicable:
return None
applicable = sorted(applicable, key=lambda name: len(name))
return applicable[0]
def apply_hypernetwork(hypernetwork, context, layer=None):
hypernetwork_layers = (hypernetwork.layers if hypernetwork is not None else {}).get(context.shape[2], None)
if hypernetwork_layers is None:
return context, context
if layer is not None:
layer.hyper_k = hypernetwork_layers[0]
layer.hyper_v = hypernetwork_layers[1]
context_k = hypernetwork_layers[0](context)
context_v = hypernetwork_layers[1](context)
return context_k, context_v
def attention_CrossAttention_forward(self, x, context=None, mask=None):
h = self.heads
q = self.to_q(x)
context = default(context, x)
context_k, context_v = apply_hypernetwork(shared.loaded_hypernetwork, context, self)
k = self.to_k(context_k)
v = self.to_v(context_v)
q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> (b h) n d', h=h), (q, k, v))
sim = einsum('b i d, b j d -> b i j', q, k) * self.scale
if mask is not None:
mask = rearrange(mask, 'b ... -> b (...)')
max_neg_value = -torch.finfo(sim.dtype).max
mask = repeat(mask, 'b j -> (b h) () j', h=h)
sim.masked_fill_(~mask, max_neg_value)
# attention, what we cannot get enough of
attn = sim.softmax(dim=-1)
out = einsum('b i j, b j d -> b i d', attn, v)
out = rearrange(out, '(b h) n d -> b n (h d)', h=h)
return self.to_out(out)
def stack_conds(conds):
if len(conds) == 1:
return torch.stack(conds)
# same as in reconstruct_multicond_batch
token_count = max([x.shape[0] for x in conds])
for i in range(len(conds)):
if conds[i].shape[0] != token_count:
last_vector = conds[i][-1:]
last_vector_repeated = last_vector.repeat([token_count - conds[i].shape[0], 1])
conds[i] = torch.vstack([conds[i], last_vector_repeated])
return torch.stack(conds)
def statistics(data):
if len(data) < 2:
std = 0
else:
std = stdev(data)
total_information = f"loss:{mean(data):.3f}" + u"\u00B1" + f"({std/ (len(data) ** 0.5):.3f})"
recent_data = data[-32:]
if len(recent_data) < 2:
std = 0
else:
std = stdev(recent_data)
recent_information = f"recent 32 loss:{mean(recent_data):.3f}" + u"\u00B1" + f"({std / (len(recent_data) ** 0.5):.3f})"
return total_information, recent_information
def report_statistics(loss_info:dict):
keys = sorted(loss_info.keys(), key=lambda x: sum(loss_info[x]) / len(loss_info[x]))
for key in keys:
try:
print("Loss statistics for file " + key)
info, recent = statistics(list(loss_info[key]))
print(info)
print(recent)
except Exception as e:
print(e)
def train_hypernetwork(hypernetwork_name, learn_rate, batch_size, data_root, log_directory, training_width, training_height, steps, create_image_every, save_hypernetwork_every, template_file, preview_from_txt2img, preview_prompt, preview_negative_prompt, preview_steps, preview_sampler_index, preview_cfg_scale, preview_seed, preview_width, preview_height):
# images allows training previews to have infotext. Importing it at the top causes a circular import problem.
from modules import images
assert hypernetwork_name, 'hypernetwork not selected'
path = shared.hypernetworks.get(hypernetwork_name, None)
shared.loaded_hypernetwork = Hypernetwork()
shared.loaded_hypernetwork.load(path)
shared.state.textinfo = "Initializing hypernetwork training..."
shared.state.job_count = steps
filename = os.path.join(shared.cmd_opts.hypernetwork_dir, f'{hypernetwork_name}.pt')
log_directory = os.path.join(log_directory, datetime.datetime.now().strftime("%Y-%m-%d"), hypernetwork_name)
unload = shared.opts.unload_models_when_training
if save_hypernetwork_every > 0:
hypernetwork_dir = os.path.join(log_directory, "hypernetworks")
os.makedirs(hypernetwork_dir, exist_ok=True)
else:
hypernetwork_dir = None
if create_image_every > 0:
images_dir = os.path.join(log_directory, "images")
os.makedirs(images_dir, exist_ok=True)
else:
images_dir = None
shared.state.textinfo = f"Preparing dataset from {html.escape(data_root)}..."
with torch.autocast("cuda"):
ds = modules.textual_inversion.dataset.PersonalizedBase(data_root=data_root, width=training_width, height=training_height, repeats=shared.opts.training_image_repeats_per_epoch, placeholder_token=hypernetwork_name, model=shared.sd_model, device=devices.device, template_file=template_file, include_cond=True, batch_size=batch_size)
if unload:
shared.sd_model.cond_stage_model.to(devices.cpu)
shared.sd_model.first_stage_model.to(devices.cpu)
hypernetwork = shared.loaded_hypernetwork
weights = hypernetwork.weights()
for weight in weights:
weight.requires_grad = True
size = len(ds.indexes)
loss_dict = defaultdict(lambda : deque(maxlen = 1024))
losses = torch.zeros((size,))
previous_mean_losses = [0]
previous_mean_loss = 0
print("Mean loss of {} elements".format(size))
last_saved_file = "<none>"
last_saved_image = "<none>"
forced_filename = "<none>"
ititial_step = hypernetwork.step or 0
if ititial_step > steps:
return hypernetwork, filename
scheduler = LearnRateScheduler(learn_rate, steps, ititial_step)
# if optimizer == "AdamW": or else Adam / AdamW / SGD, etc...
optimizer = torch.optim.AdamW(weights, lr=scheduler.learn_rate)
steps_without_grad = 0
pbar = tqdm.tqdm(enumerate(ds), total=steps - ititial_step)
for i, entries in pbar:
hypernetwork.step = i + ititial_step
if len(loss_dict) > 0:
previous_mean_losses = [i[-1] for i in loss_dict.values()]
previous_mean_loss = mean(previous_mean_losses)
scheduler.apply(optimizer, hypernetwork.step)
if scheduler.finished:
break
if shared.state.interrupted:
break
with torch.autocast("cuda"):
c = stack_conds([entry.cond for entry in entries]).to(devices.device)
# c = torch.vstack([entry.cond for entry in entries]).to(devices.device)
x = torch.stack([entry.latent for entry in entries]).to(devices.device)
loss = shared.sd_model(x, c)[0]
del x
del c
losses[hypernetwork.step % losses.shape[0]] = loss.item()
for entry in entries:
loss_dict[entry.filename].append(loss.item())
optimizer.zero_grad()
weights[0].grad = None
loss.backward()
if weights[0].grad is None:
steps_without_grad += 1
else:
steps_without_grad = 0
assert steps_without_grad < 10, 'no gradient found for the trained weight after backward() for 10 steps in a row; this is a bug; training cannot continue'
optimizer.step()
if torch.isnan(losses[hypernetwork.step % losses.shape[0]]):
raise RuntimeError("Loss diverged.")
if len(previous_mean_losses) > 1:
std = stdev(previous_mean_losses)
else:
std = 0
dataset_loss_info = f"dataset loss:{mean(previous_mean_losses):.3f}" + u"\u00B1" + f"({std / (len(previous_mean_losses) ** 0.5):.3f})"
pbar.set_description(dataset_loss_info)
if hypernetwork.step > 0 and hypernetwork_dir is not None and hypernetwork.step % save_hypernetwork_every == 0:
# Before saving, change name to match current checkpoint.
hypernetwork.name = f'{hypernetwork_name}-{hypernetwork.step}'
last_saved_file = os.path.join(hypernetwork_dir, f'{hypernetwork.name}.pt')
hypernetwork.save(last_saved_file)
textual_inversion.write_loss(log_directory, "hypernetwork_loss.csv", hypernetwork.step, len(ds), {
"loss": f"{previous_mean_loss:.7f}",
"learn_rate": scheduler.learn_rate
})
if hypernetwork.step > 0 and images_dir is not None and hypernetwork.step % create_image_every == 0:
forced_filename = f'{hypernetwork_name}-{hypernetwork.step}'
last_saved_image = os.path.join(images_dir, forced_filename)
optimizer.zero_grad()
shared.sd_model.cond_stage_model.to(devices.device)
shared.sd_model.first_stage_model.to(devices.device)
p = processing.StableDiffusionProcessingTxt2Img(
sd_model=shared.sd_model,
do_not_save_grid=True,
do_not_save_samples=True,
)
if preview_from_txt2img:
p.prompt = preview_prompt
p.negative_prompt = preview_negative_prompt
p.steps = preview_steps
p.sampler_index = preview_sampler_index
p.cfg_scale = preview_cfg_scale
p.seed = preview_seed
p.width = preview_width
p.height = preview_height
else:
p.prompt = entries[0].cond_text
p.steps = 20
preview_text = p.prompt
processed = processing.process_images(p)
image = processed.images[0] if len(processed.images)>0 else None
if unload:
shared.sd_model.cond_stage_model.to(devices.cpu)
shared.sd_model.first_stage_model.to(devices.cpu)
if image is not None:
shared.state.current_image = image
last_saved_image, last_text_info = images.save_image(image, images_dir, "", p.seed, p.prompt, shared.opts.samples_format, processed.infotexts[0], p=p, forced_filename=forced_filename, save_to_dirs=False)
last_saved_image += f", prompt: {preview_text}"
shared.state.job_no = hypernetwork.step
shared.state.textinfo = f"""
<p>
Loss: {previous_mean_loss:.7f}<br/>
Step: {hypernetwork.step}<br/>
Last prompt: {html.escape(entries[0].cond_text)}<br/>
Last saved hypernetwork: {html.escape(last_saved_file)}<br/>
Last saved image: {html.escape(last_saved_image)}<br/>
</p>
"""
report_statistics(loss_dict)
checkpoint = sd_models.select_checkpoint()
hypernetwork.sd_checkpoint = checkpoint.hash
hypernetwork.sd_checkpoint_name = checkpoint.model_name
# Before saving for the last time, change name back to the base name (as opposed to the save_hypernetwork_every step-suffixed naming convention).
hypernetwork.name = hypernetwork_name
filename = os.path.join(shared.cmd_opts.hypernetwork_dir, f'{hypernetwork.name}.pt')
hypernetwork.save(filename)
return hypernetwork, filename
<|code_end|>
modules/textual_inversion/learn_schedule.py
<|code_start|>import tqdm
class LearnScheduleIterator:
def __init__(self, learn_rate, max_steps, cur_step=0):
"""
specify learn_rate as "0.001:100, 0.00001:1000, 1e-5:10000" to have lr of 0.001 until step 100, 0.00001 until 1000, 1e-5:10000 until 10000
"""
pairs = learn_rate.split(',')
self.rates = []
self.it = 0
self.maxit = 0
for i, pair in enumerate(pairs):
tmp = pair.split(':')
if len(tmp) == 2:
step = int(tmp[1])
if step > cur_step:
self.rates.append((float(tmp[0]), min(step, max_steps)))
self.maxit += 1
if step > max_steps:
return
elif step == -1:
self.rates.append((float(tmp[0]), max_steps))
self.maxit += 1
return
else:
self.rates.append((float(tmp[0]), max_steps))
self.maxit += 1
return
def __iter__(self):
return self
def __next__(self):
if self.it < self.maxit:
self.it += 1
return self.rates[self.it - 1]
else:
raise StopIteration
class LearnRateScheduler:
def __init__(self, learn_rate, max_steps, cur_step=0, verbose=True):
self.schedules = LearnScheduleIterator(learn_rate, max_steps, cur_step)
(self.learn_rate, self.end_step) = next(self.schedules)
self.verbose = verbose
if self.verbose:
print(f'Training at rate of {self.learn_rate} until step {self.end_step}')
self.finished = False
def apply(self, optimizer, step_number):
if step_number <= self.end_step:
return
try:
(self.learn_rate, self.end_step) = next(self.schedules)
except Exception:
self.finished = True
return
if self.verbose:
tqdm.tqdm.write(f'Training at rate of {self.learn_rate} until step {self.end_step}')
for pg in optimizer.param_groups:
pg['lr'] = self.learn_rate
<|code_end|>
modules/textual_inversion/textual_inversion.py
<|code_start|>import os
import sys
import traceback
import torch
import tqdm
import html
import datetime
import csv
from PIL import Image, PngImagePlugin
from modules import shared, devices, sd_hijack, processing, sd_models, images
import modules.textual_inversion.dataset
from modules.textual_inversion.learn_schedule import LearnRateScheduler
from modules.textual_inversion.image_embedding import (embedding_to_b64, embedding_from_b64,
insert_image_data_embed, extract_image_data_embed,
caption_image_overlay)
class Embedding:
def __init__(self, vec, name, step=None):
self.vec = vec
self.name = name
self.step = step
self.cached_checksum = None
self.sd_checkpoint = None
self.sd_checkpoint_name = None
def save(self, filename):
embedding_data = {
"string_to_token": {"*": 265},
"string_to_param": {"*": self.vec},
"name": self.name,
"step": self.step,
"sd_checkpoint": self.sd_checkpoint,
"sd_checkpoint_name": self.sd_checkpoint_name,
}
torch.save(embedding_data, filename)
def checksum(self):
if self.cached_checksum is not None:
return self.cached_checksum
def const_hash(a):
r = 0
for v in a:
r = (r * 281 ^ int(v) * 997) & 0xFFFFFFFF
return r
self.cached_checksum = f'{const_hash(self.vec.reshape(-1) * 100) & 0xffff:04x}'
return self.cached_checksum
class EmbeddingDatabase:
def __init__(self, embeddings_dir):
self.ids_lookup = {}
self.word_embeddings = {}
self.dir_mtime = None
self.embeddings_dir = embeddings_dir
def register_embedding(self, embedding, model):
self.word_embeddings[embedding.name] = embedding
ids = model.cond_stage_model.tokenizer([embedding.name], add_special_tokens=False)['input_ids'][0]
first_id = ids[0]
if first_id not in self.ids_lookup:
self.ids_lookup[first_id] = []
self.ids_lookup[first_id] = sorted(self.ids_lookup[first_id] + [(ids, embedding)], key=lambda x: len(x[0]), reverse=True)
return embedding
def load_textual_inversion_embeddings(self):
mt = os.path.getmtime(self.embeddings_dir)
if self.dir_mtime is not None and mt <= self.dir_mtime:
return
self.dir_mtime = mt
self.ids_lookup.clear()
self.word_embeddings.clear()
def process_file(path, filename):
name = os.path.splitext(filename)[0]
data = []
if os.path.splitext(filename.upper())[-1] in ['.PNG', '.WEBP', '.JXL', '.AVIF']:
embed_image = Image.open(path)
if hasattr(embed_image, 'text') and 'sd-ti-embedding' in embed_image.text:
data = embedding_from_b64(embed_image.text['sd-ti-embedding'])
name = data.get('name', name)
else:
data = extract_image_data_embed(embed_image)
name = data.get('name', name)
else:
data = torch.load(path, map_location="cpu")
# textual inversion embeddings
if 'string_to_param' in data:
param_dict = data['string_to_param']
if hasattr(param_dict, '_parameters'):
param_dict = getattr(param_dict, '_parameters') # fix for torch 1.12.1 loading saved file from torch 1.11
assert len(param_dict) == 1, 'embedding file has multiple terms in it'
emb = next(iter(param_dict.items()))[1]
# diffuser concepts
elif type(data) == dict and type(next(iter(data.values()))) == torch.Tensor:
assert len(data.keys()) == 1, 'embedding file has multiple terms in it'
emb = next(iter(data.values()))
if len(emb.shape) == 1:
emb = emb.unsqueeze(0)
else:
raise Exception(f"Couldn't identify {filename} as neither textual inversion embedding nor diffuser concept.")
vec = emb.detach().to(devices.device, dtype=torch.float32)
embedding = Embedding(vec, name)
embedding.step = data.get('step', None)
embedding.sd_checkpoint = data.get('hash', None)
embedding.sd_checkpoint_name = data.get('sd_checkpoint_name', None)
self.register_embedding(embedding, shared.sd_model)
for fn in os.listdir(self.embeddings_dir):
try:
fullfn = os.path.join(self.embeddings_dir, fn)
if os.stat(fullfn).st_size == 0:
continue
process_file(fullfn, fn)
except Exception:
print(f"Error loading emedding {fn}:", file=sys.stderr)
print(traceback.format_exc(), file=sys.stderr)
continue
print(f"Loaded a total of {len(self.word_embeddings)} textual inversion embeddings.")
print("Embeddings:", ', '.join(self.word_embeddings.keys()))
def find_embedding_at_position(self, tokens, offset):
token = tokens[offset]
possible_matches = self.ids_lookup.get(token, None)
if possible_matches is None:
return None, None
for ids, embedding in possible_matches:
if tokens[offset:offset + len(ids)] == ids:
return embedding, len(ids)
return None, None
def create_embedding(name, num_vectors_per_token, overwrite_old, init_text='*'):
cond_model = shared.sd_model.cond_stage_model
embedding_layer = cond_model.wrapped.transformer.text_model.embeddings
with devices.autocast():
cond_model([""]) # will send cond model to GPU if lowvram/medvram is active
ids = cond_model.tokenizer(init_text, max_length=num_vectors_per_token, return_tensors="pt", add_special_tokens=False)["input_ids"]
embedded = embedding_layer.token_embedding.wrapped(ids.to(devices.device)).squeeze(0)
vec = torch.zeros((num_vectors_per_token, embedded.shape[1]), device=devices.device)
for i in range(num_vectors_per_token):
vec[i] = embedded[i * int(embedded.shape[0]) // num_vectors_per_token]
# Remove illegal characters from name.
name = "".join( x for x in name if (x.isalnum() or x in "._- "))
fn = os.path.join(shared.cmd_opts.embeddings_dir, f"{name}.pt")
if not overwrite_old:
assert not os.path.exists(fn), f"file {fn} already exists"
embedding = Embedding(vec, name)
embedding.step = 0
embedding.save(fn)
return fn
def write_loss(log_directory, filename, step, epoch_len, values):
if shared.opts.training_write_csv_every == 0:
return
if step % shared.opts.training_write_csv_every != 0:
return
write_csv_header = False if os.path.exists(os.path.join(log_directory, filename)) else True
with open(os.path.join(log_directory, filename), "a+", newline='') as fout:
csv_writer = csv.DictWriter(fout, fieldnames=["step", "epoch", "epoch_step", *(values.keys())])
if write_csv_header:
csv_writer.writeheader()
epoch = step // epoch_len
epoch_step = step - epoch * epoch_len
csv_writer.writerow({
"step": step + 1,
"epoch": epoch + 1,
"epoch_step": epoch_step + 1,
**values,
})
def train_embedding(embedding_name, learn_rate, batch_size, data_root, log_directory, training_width, training_height, steps, create_image_every, save_embedding_every, template_file, save_image_with_stored_embedding, preview_from_txt2img, preview_prompt, preview_negative_prompt, preview_steps, preview_sampler_index, preview_cfg_scale, preview_seed, preview_width, preview_height):
assert embedding_name, 'embedding not selected'
shared.state.textinfo = "Initializing textual inversion training..."
shared.state.job_count = steps
filename = os.path.join(shared.cmd_opts.embeddings_dir, f'{embedding_name}.pt')
log_directory = os.path.join(log_directory, datetime.datetime.now().strftime("%Y-%m-%d"), embedding_name)
if save_embedding_every > 0:
embedding_dir = os.path.join(log_directory, "embeddings")
os.makedirs(embedding_dir, exist_ok=True)
else:
embedding_dir = None
if create_image_every > 0:
images_dir = os.path.join(log_directory, "images")
os.makedirs(images_dir, exist_ok=True)
else:
images_dir = None
if create_image_every > 0 and save_image_with_stored_embedding:
images_embeds_dir = os.path.join(log_directory, "image_embeddings")
os.makedirs(images_embeds_dir, exist_ok=True)
else:
images_embeds_dir = None
cond_model = shared.sd_model.cond_stage_model
shared.state.textinfo = f"Preparing dataset from {html.escape(data_root)}..."
with torch.autocast("cuda"):
ds = modules.textual_inversion.dataset.PersonalizedBase(data_root=data_root, width=training_width, height=training_height, repeats=shared.opts.training_image_repeats_per_epoch, placeholder_token=embedding_name, model=shared.sd_model, device=devices.device, template_file=template_file, batch_size=batch_size)
hijack = sd_hijack.model_hijack
embedding = hijack.embedding_db.word_embeddings[embedding_name]
embedding.vec.requires_grad = True
losses = torch.zeros((32,))
last_saved_file = "<none>"
last_saved_image = "<none>"
forced_filename = "<none>"
embedding_yet_to_be_embedded = False
ititial_step = embedding.step or 0
if ititial_step > steps:
return embedding, filename
scheduler = LearnRateScheduler(learn_rate, steps, ititial_step)
optimizer = torch.optim.AdamW([embedding.vec], lr=scheduler.learn_rate)
pbar = tqdm.tqdm(enumerate(ds), total=steps-ititial_step)
for i, entries in pbar:
embedding.step = i + ititial_step
scheduler.apply(optimizer, embedding.step)
if scheduler.finished:
break
if shared.state.interrupted:
break
with torch.autocast("cuda"):
c = cond_model([entry.cond_text for entry in entries])
x = torch.stack([entry.latent for entry in entries]).to(devices.device)
loss = shared.sd_model(x, c)[0]
del x
losses[embedding.step % losses.shape[0]] = loss.item()
optimizer.zero_grad()
loss.backward()
optimizer.step()
epoch_num = embedding.step // len(ds)
epoch_step = embedding.step - (epoch_num * len(ds)) + 1
pbar.set_description(f"[Epoch {epoch_num}: {epoch_step}/{len(ds)}]loss: {losses.mean():.7f}")
if embedding.step > 0 and embedding_dir is not None and embedding.step % save_embedding_every == 0:
# Before saving, change name to match current checkpoint.
embedding.name = f'{embedding_name}-{embedding.step}'
last_saved_file = os.path.join(embedding_dir, f'{embedding.name}.pt')
embedding.save(last_saved_file)
embedding_yet_to_be_embedded = True
write_loss(log_directory, "textual_inversion_loss.csv", embedding.step, len(ds), {
"loss": f"{losses.mean():.7f}",
"learn_rate": scheduler.learn_rate
})
if embedding.step > 0 and images_dir is not None and embedding.step % create_image_every == 0:
forced_filename = f'{embedding_name}-{embedding.step}'
last_saved_image = os.path.join(images_dir, forced_filename)
p = processing.StableDiffusionProcessingTxt2Img(
sd_model=shared.sd_model,
do_not_save_grid=True,
do_not_save_samples=True,
do_not_reload_embeddings=True,
)
if preview_from_txt2img:
p.prompt = preview_prompt
p.negative_prompt = preview_negative_prompt
p.steps = preview_steps
p.sampler_index = preview_sampler_index
p.cfg_scale = preview_cfg_scale
p.seed = preview_seed
p.width = preview_width
p.height = preview_height
else:
p.prompt = entries[0].cond_text
p.steps = 20
p.width = training_width
p.height = training_height
preview_text = p.prompt
processed = processing.process_images(p)
image = processed.images[0]
shared.state.current_image = image
if save_image_with_stored_embedding and os.path.exists(last_saved_file) and embedding_yet_to_be_embedded:
last_saved_image_chunks = os.path.join(images_embeds_dir, f'{embedding_name}-{embedding.step}.png')
info = PngImagePlugin.PngInfo()
data = torch.load(last_saved_file)
info.add_text("sd-ti-embedding", embedding_to_b64(data))
title = "<{}>".format(data.get('name', '???'))
try:
vectorSize = list(data['string_to_param'].values())[0].shape[0]
except Exception as e:
vectorSize = '?'
checkpoint = sd_models.select_checkpoint()
footer_left = checkpoint.model_name
footer_mid = '[{}]'.format(checkpoint.hash)
footer_right = '{}v {}s'.format(vectorSize, embedding.step)
captioned_image = caption_image_overlay(image, title, footer_left, footer_mid, footer_right)
captioned_image = insert_image_data_embed(captioned_image, data)
captioned_image.save(last_saved_image_chunks, "PNG", pnginfo=info)
embedding_yet_to_be_embedded = False
last_saved_image, last_text_info = images.save_image(image, images_dir, "", p.seed, p.prompt, shared.opts.samples_format, processed.infotexts[0], p=p, forced_filename=forced_filename, save_to_dirs=False)
last_saved_image += f", prompt: {preview_text}"
shared.state.job_no = embedding.step
shared.state.textinfo = f"""
<p>
Loss: {losses.mean():.7f}<br/>
Step: {embedding.step}<br/>
Last prompt: {html.escape(entries[0].cond_text)}<br/>
Last saved embedding: {html.escape(last_saved_file)}<br/>
Last saved image: {html.escape(last_saved_image)}<br/>
</p>
"""
checkpoint = sd_models.select_checkpoint()
embedding.sd_checkpoint = checkpoint.hash
embedding.sd_checkpoint_name = checkpoint.model_name
embedding.cached_checksum = None
# Before saving for the last time, change name back to base name (as opposed to the save_embedding_every step-suffixed naming convention).
embedding.name = embedding_name
filename = os.path.join(shared.cmd_opts.embeddings_dir, f'{embedding.name}.pt')
embedding.save(filename)
return embedding, filename
<|code_end|>
|
modules/hypernetworks/hypernetwork.py
<|code_start|>import csv
import datetime
import glob
import html
import os
import sys
import traceback
import inspect
import modules.textual_inversion.dataset
import torch
import tqdm
from einops import rearrange, repeat
from ldm.util import default
from modules import devices, processing, sd_models, shared
from modules.textual_inversion import textual_inversion
from modules.textual_inversion.learn_schedule import LearnRateScheduler
from torch import einsum
from torch.nn.init import normal_, xavier_normal_, xavier_uniform_, kaiming_normal_, kaiming_uniform_, zeros_
from collections import defaultdict, deque
from statistics import stdev, mean
class HypernetworkModule(torch.nn.Module):
multiplier = 1.0
activation_dict = {
"relu": torch.nn.ReLU,
"leakyrelu": torch.nn.LeakyReLU,
"elu": torch.nn.ELU,
"swish": torch.nn.Hardswish,
"tanh": torch.nn.Tanh,
"sigmoid": torch.nn.Sigmoid,
}
activation_dict.update({cls_name.lower(): cls_obj for cls_name, cls_obj in inspect.getmembers(torch.nn.modules.activation) if inspect.isclass(cls_obj) and cls_obj.__module__ == 'torch.nn.modules.activation'})
def __init__(self, dim, state_dict=None, layer_structure=None, activation_func=None, weight_init='Normal', add_layer_norm=False, use_dropout=False):
super().__init__()
assert layer_structure is not None, "layer_structure must not be None"
assert layer_structure[0] == 1, "Multiplier Sequence should start with size 1!"
assert layer_structure[-1] == 1, "Multiplier Sequence should end with size 1!"
linears = []
for i in range(len(layer_structure) - 1):
# Add a fully-connected layer
linears.append(torch.nn.Linear(int(dim * layer_structure[i]), int(dim * layer_structure[i+1])))
# Add an activation func
if activation_func == "linear" or activation_func is None:
pass
elif activation_func in self.activation_dict:
linears.append(self.activation_dict[activation_func]())
else:
raise RuntimeError(f'hypernetwork uses an unsupported activation function: {activation_func}')
# Add layer normalization
if add_layer_norm:
linears.append(torch.nn.LayerNorm(int(dim * layer_structure[i+1])))
# Add dropout expect last layer
if use_dropout and i < len(layer_structure) - 3:
linears.append(torch.nn.Dropout(p=0.3))
self.linear = torch.nn.Sequential(*linears)
if state_dict is not None:
self.fix_old_state_dict(state_dict)
self.load_state_dict(state_dict)
else:
for layer in self.linear:
if type(layer) == torch.nn.Linear or type(layer) == torch.nn.LayerNorm:
w, b = layer.weight.data, layer.bias.data
if weight_init == "Normal" or type(layer) == torch.nn.LayerNorm:
normal_(w, mean=0.0, std=0.01)
normal_(b, mean=0.0, std=0.005)
elif weight_init == 'XavierUniform':
xavier_uniform_(w)
zeros_(b)
elif weight_init == 'XavierNormal':
xavier_normal_(w)
zeros_(b)
elif weight_init == 'KaimingUniform':
kaiming_uniform_(w, nonlinearity='leaky_relu' if 'leakyrelu' == activation_func else 'relu')
zeros_(b)
elif weight_init == 'KaimingNormal':
kaiming_normal_(w, nonlinearity='leaky_relu' if 'leakyrelu' == activation_func else 'relu')
zeros_(b)
else:
raise KeyError(f"Key {weight_init} is not defined as initialization!")
self.to(devices.device)
def fix_old_state_dict(self, state_dict):
changes = {
'linear1.bias': 'linear.0.bias',
'linear1.weight': 'linear.0.weight',
'linear2.bias': 'linear.1.bias',
'linear2.weight': 'linear.1.weight',
}
for fr, to in changes.items():
x = state_dict.get(fr, None)
if x is None:
continue
del state_dict[fr]
state_dict[to] = x
def forward(self, x):
return x + self.linear(x) * self.multiplier
def trainables(self):
layer_structure = []
for layer in self.linear:
if type(layer) == torch.nn.Linear or type(layer) == torch.nn.LayerNorm:
layer_structure += [layer.weight, layer.bias]
return layer_structure
def apply_strength(value=None):
HypernetworkModule.multiplier = value if value is not None else shared.opts.sd_hypernetwork_strength
class Hypernetwork:
filename = None
name = None
def __init__(self, name=None, enable_sizes=None, layer_structure=None, activation_func=None, weight_init=None, add_layer_norm=False, use_dropout=False):
self.filename = None
self.name = name
self.layers = {}
self.step = 0
self.sd_checkpoint = None
self.sd_checkpoint_name = None
self.layer_structure = layer_structure
self.activation_func = activation_func
self.weight_init = weight_init
self.add_layer_norm = add_layer_norm
self.use_dropout = use_dropout
for size in enable_sizes or []:
self.layers[size] = (
HypernetworkModule(size, None, self.layer_structure, self.activation_func, self.weight_init, self.add_layer_norm, self.use_dropout),
HypernetworkModule(size, None, self.layer_structure, self.activation_func, self.weight_init, self.add_layer_norm, self.use_dropout),
)
def weights(self):
res = []
for k, layers in self.layers.items():
for layer in layers:
layer.train()
res += layer.trainables()
return res
def save(self, filename):
state_dict = {}
for k, v in self.layers.items():
state_dict[k] = (v[0].state_dict(), v[1].state_dict())
state_dict['step'] = self.step
state_dict['name'] = self.name
state_dict['layer_structure'] = self.layer_structure
state_dict['activation_func'] = self.activation_func
state_dict['is_layer_norm'] = self.add_layer_norm
state_dict['weight_initialization'] = self.weight_init
state_dict['use_dropout'] = self.use_dropout
state_dict['sd_checkpoint'] = self.sd_checkpoint
state_dict['sd_checkpoint_name'] = self.sd_checkpoint_name
torch.save(state_dict, filename)
def load(self, filename):
self.filename = filename
if self.name is None:
self.name = os.path.splitext(os.path.basename(filename))[0]
state_dict = torch.load(filename, map_location='cpu')
self.layer_structure = state_dict.get('layer_structure', [1, 2, 1])
print(self.layer_structure)
self.activation_func = state_dict.get('activation_func', None)
print(f"Activation function is {self.activation_func}")
self.weight_init = state_dict.get('weight_initialization', 'Normal')
print(f"Weight initialization is {self.weight_init}")
self.add_layer_norm = state_dict.get('is_layer_norm', False)
print(f"Layer norm is set to {self.add_layer_norm}")
self.use_dropout = state_dict.get('use_dropout', False)
print(f"Dropout usage is set to {self.use_dropout}" )
for size, sd in state_dict.items():
if type(size) == int:
self.layers[size] = (
HypernetworkModule(size, sd[0], self.layer_structure, self.activation_func, self.weight_init, self.add_layer_norm, self.use_dropout),
HypernetworkModule(size, sd[1], self.layer_structure, self.activation_func, self.weight_init, self.add_layer_norm, self.use_dropout),
)
self.name = state_dict.get('name', self.name)
self.step = state_dict.get('step', 0)
self.sd_checkpoint = state_dict.get('sd_checkpoint', None)
self.sd_checkpoint_name = state_dict.get('sd_checkpoint_name', None)
def list_hypernetworks(path):
res = {}
for filename in glob.iglob(os.path.join(path, '**/*.pt'), recursive=True):
name = os.path.splitext(os.path.basename(filename))[0]
res[name] = filename
return res
def load_hypernetwork(filename):
path = shared.hypernetworks.get(filename, None)
if path is not None:
print(f"Loading hypernetwork {filename}")
try:
shared.loaded_hypernetwork = Hypernetwork()
shared.loaded_hypernetwork.load(path)
except Exception:
print(f"Error loading hypernetwork {path}", file=sys.stderr)
print(traceback.format_exc(), file=sys.stderr)
else:
if shared.loaded_hypernetwork is not None:
print(f"Unloading hypernetwork")
shared.loaded_hypernetwork = None
def find_closest_hypernetwork_name(search: str):
if not search:
return None
search = search.lower()
applicable = [name for name in shared.hypernetworks if search in name.lower()]
if not applicable:
return None
applicable = sorted(applicable, key=lambda name: len(name))
return applicable[0]
def apply_hypernetwork(hypernetwork, context, layer=None):
hypernetwork_layers = (hypernetwork.layers if hypernetwork is not None else {}).get(context.shape[2], None)
if hypernetwork_layers is None:
return context, context
if layer is not None:
layer.hyper_k = hypernetwork_layers[0]
layer.hyper_v = hypernetwork_layers[1]
context_k = hypernetwork_layers[0](context)
context_v = hypernetwork_layers[1](context)
return context_k, context_v
def attention_CrossAttention_forward(self, x, context=None, mask=None):
h = self.heads
q = self.to_q(x)
context = default(context, x)
context_k, context_v = apply_hypernetwork(shared.loaded_hypernetwork, context, self)
k = self.to_k(context_k)
v = self.to_v(context_v)
q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> (b h) n d', h=h), (q, k, v))
sim = einsum('b i d, b j d -> b i j', q, k) * self.scale
if mask is not None:
mask = rearrange(mask, 'b ... -> b (...)')
max_neg_value = -torch.finfo(sim.dtype).max
mask = repeat(mask, 'b j -> (b h) () j', h=h)
sim.masked_fill_(~mask, max_neg_value)
# attention, what we cannot get enough of
attn = sim.softmax(dim=-1)
out = einsum('b i j, b j d -> b i d', attn, v)
out = rearrange(out, '(b h) n d -> b n (h d)', h=h)
return self.to_out(out)
def stack_conds(conds):
if len(conds) == 1:
return torch.stack(conds)
# same as in reconstruct_multicond_batch
token_count = max([x.shape[0] for x in conds])
for i in range(len(conds)):
if conds[i].shape[0] != token_count:
last_vector = conds[i][-1:]
last_vector_repeated = last_vector.repeat([token_count - conds[i].shape[0], 1])
conds[i] = torch.vstack([conds[i], last_vector_repeated])
return torch.stack(conds)
def statistics(data):
if len(data) < 2:
std = 0
else:
std = stdev(data)
total_information = f"loss:{mean(data):.3f}" + u"\u00B1" + f"({std/ (len(data) ** 0.5):.3f})"
recent_data = data[-32:]
if len(recent_data) < 2:
std = 0
else:
std = stdev(recent_data)
recent_information = f"recent 32 loss:{mean(recent_data):.3f}" + u"\u00B1" + f"({std / (len(recent_data) ** 0.5):.3f})"
return total_information, recent_information
def report_statistics(loss_info:dict):
keys = sorted(loss_info.keys(), key=lambda x: sum(loss_info[x]) / len(loss_info[x]))
for key in keys:
try:
print("Loss statistics for file " + key)
info, recent = statistics(list(loss_info[key]))
print(info)
print(recent)
except Exception as e:
print(e)
def train_hypernetwork(hypernetwork_name, learn_rate, batch_size, data_root, log_directory, training_width, training_height, steps, create_image_every, save_hypernetwork_every, template_file, preview_from_txt2img, preview_prompt, preview_negative_prompt, preview_steps, preview_sampler_index, preview_cfg_scale, preview_seed, preview_width, preview_height):
# images allows training previews to have infotext. Importing it at the top causes a circular import problem.
from modules import images
assert hypernetwork_name, 'hypernetwork not selected'
path = shared.hypernetworks.get(hypernetwork_name, None)
shared.loaded_hypernetwork = Hypernetwork()
shared.loaded_hypernetwork.load(path)
shared.state.textinfo = "Initializing hypernetwork training..."
shared.state.job_count = steps
filename = os.path.join(shared.cmd_opts.hypernetwork_dir, f'{hypernetwork_name}.pt')
log_directory = os.path.join(log_directory, datetime.datetime.now().strftime("%Y-%m-%d"), hypernetwork_name)
unload = shared.opts.unload_models_when_training
if save_hypernetwork_every > 0:
hypernetwork_dir = os.path.join(log_directory, "hypernetworks")
os.makedirs(hypernetwork_dir, exist_ok=True)
else:
hypernetwork_dir = None
if create_image_every > 0:
images_dir = os.path.join(log_directory, "images")
os.makedirs(images_dir, exist_ok=True)
else:
images_dir = None
shared.state.textinfo = f"Preparing dataset from {html.escape(data_root)}..."
with torch.autocast("cuda"):
ds = modules.textual_inversion.dataset.PersonalizedBase(data_root=data_root, width=training_width, height=training_height, repeats=shared.opts.training_image_repeats_per_epoch, placeholder_token=hypernetwork_name, model=shared.sd_model, device=devices.device, template_file=template_file, include_cond=True, batch_size=batch_size)
if unload:
shared.sd_model.cond_stage_model.to(devices.cpu)
shared.sd_model.first_stage_model.to(devices.cpu)
hypernetwork = shared.loaded_hypernetwork
weights = hypernetwork.weights()
for weight in weights:
weight.requires_grad = True
size = len(ds.indexes)
loss_dict = defaultdict(lambda : deque(maxlen = 1024))
losses = torch.zeros((size,))
previous_mean_losses = [0]
previous_mean_loss = 0
print("Mean loss of {} elements".format(size))
last_saved_file = "<none>"
last_saved_image = "<none>"
forced_filename = "<none>"
ititial_step = hypernetwork.step or 0
if ititial_step > steps:
return hypernetwork, filename
scheduler = LearnRateScheduler(learn_rate, steps, ititial_step)
# if optimizer == "AdamW": or else Adam / AdamW / SGD, etc...
optimizer = torch.optim.AdamW(weights, lr=scheduler.learn_rate)
steps_without_grad = 0
pbar = tqdm.tqdm(enumerate(ds), total=steps - ititial_step)
for i, entries in pbar:
hypernetwork.step = i + ititial_step
if len(loss_dict) > 0:
previous_mean_losses = [i[-1] for i in loss_dict.values()]
previous_mean_loss = mean(previous_mean_losses)
scheduler.apply(optimizer, hypernetwork.step)
if scheduler.finished:
break
if shared.state.interrupted:
break
with torch.autocast("cuda"):
c = stack_conds([entry.cond for entry in entries]).to(devices.device)
# c = torch.vstack([entry.cond for entry in entries]).to(devices.device)
x = torch.stack([entry.latent for entry in entries]).to(devices.device)
loss = shared.sd_model(x, c)[0]
del x
del c
losses[hypernetwork.step % losses.shape[0]] = loss.item()
for entry in entries:
loss_dict[entry.filename].append(loss.item())
optimizer.zero_grad()
weights[0].grad = None
loss.backward()
if weights[0].grad is None:
steps_without_grad += 1
else:
steps_without_grad = 0
assert steps_without_grad < 10, 'no gradient found for the trained weight after backward() for 10 steps in a row; this is a bug; training cannot continue'
optimizer.step()
steps_done = hypernetwork.step + 1
if torch.isnan(losses[hypernetwork.step % losses.shape[0]]):
raise RuntimeError("Loss diverged.")
if len(previous_mean_losses) > 1:
std = stdev(previous_mean_losses)
else:
std = 0
dataset_loss_info = f"dataset loss:{mean(previous_mean_losses):.3f}" + u"\u00B1" + f"({std / (len(previous_mean_losses) ** 0.5):.3f})"
pbar.set_description(dataset_loss_info)
if hypernetwork_dir is not None and steps_done % save_hypernetwork_every == 0:
# Before saving, change name to match current checkpoint.
hypernetwork.name = f'{hypernetwork_name}-{steps_done}'
last_saved_file = os.path.join(hypernetwork_dir, f'{hypernetwork.name}.pt')
hypernetwork.save(last_saved_file)
textual_inversion.write_loss(log_directory, "hypernetwork_loss.csv", hypernetwork.step, len(ds), {
"loss": f"{previous_mean_loss:.7f}",
"learn_rate": scheduler.learn_rate
})
if images_dir is not None and steps_done % create_image_every == 0:
forced_filename = f'{hypernetwork_name}-{steps_done}'
last_saved_image = os.path.join(images_dir, forced_filename)
optimizer.zero_grad()
shared.sd_model.cond_stage_model.to(devices.device)
shared.sd_model.first_stage_model.to(devices.device)
p = processing.StableDiffusionProcessingTxt2Img(
sd_model=shared.sd_model,
do_not_save_grid=True,
do_not_save_samples=True,
)
if preview_from_txt2img:
p.prompt = preview_prompt
p.negative_prompt = preview_negative_prompt
p.steps = preview_steps
p.sampler_index = preview_sampler_index
p.cfg_scale = preview_cfg_scale
p.seed = preview_seed
p.width = preview_width
p.height = preview_height
else:
p.prompt = entries[0].cond_text
p.steps = 20
preview_text = p.prompt
processed = processing.process_images(p)
image = processed.images[0] if len(processed.images)>0 else None
if unload:
shared.sd_model.cond_stage_model.to(devices.cpu)
shared.sd_model.first_stage_model.to(devices.cpu)
if image is not None:
shared.state.current_image = image
last_saved_image, last_text_info = images.save_image(image, images_dir, "", p.seed, p.prompt, shared.opts.samples_format, processed.infotexts[0], p=p, forced_filename=forced_filename, save_to_dirs=False)
last_saved_image += f", prompt: {preview_text}"
shared.state.job_no = hypernetwork.step
shared.state.textinfo = f"""
<p>
Loss: {previous_mean_loss:.7f}<br/>
Step: {hypernetwork.step}<br/>
Last prompt: {html.escape(entries[0].cond_text)}<br/>
Last saved hypernetwork: {html.escape(last_saved_file)}<br/>
Last saved image: {html.escape(last_saved_image)}<br/>
</p>
"""
report_statistics(loss_dict)
checkpoint = sd_models.select_checkpoint()
hypernetwork.sd_checkpoint = checkpoint.hash
hypernetwork.sd_checkpoint_name = checkpoint.model_name
# Before saving for the last time, change name back to the base name (as opposed to the save_hypernetwork_every step-suffixed naming convention).
hypernetwork.name = hypernetwork_name
filename = os.path.join(shared.cmd_opts.hypernetwork_dir, f'{hypernetwork.name}.pt')
hypernetwork.save(filename)
return hypernetwork, filename
<|code_end|>
modules/textual_inversion/learn_schedule.py
<|code_start|>import tqdm
class LearnScheduleIterator:
def __init__(self, learn_rate, max_steps, cur_step=0):
"""
specify learn_rate as "0.001:100, 0.00001:1000, 1e-5:10000" to have lr of 0.001 until step 100, 0.00001 until 1000, 1e-5:10000 until 10000
"""
pairs = learn_rate.split(',')
self.rates = []
self.it = 0
self.maxit = 0
for i, pair in enumerate(pairs):
tmp = pair.split(':')
if len(tmp) == 2:
step = int(tmp[1])
if step > cur_step:
self.rates.append((float(tmp[0]), min(step, max_steps)))
self.maxit += 1
if step > max_steps:
return
elif step == -1:
self.rates.append((float(tmp[0]), max_steps))
self.maxit += 1
return
else:
self.rates.append((float(tmp[0]), max_steps))
self.maxit += 1
return
def __iter__(self):
return self
def __next__(self):
if self.it < self.maxit:
self.it += 1
return self.rates[self.it - 1]
else:
raise StopIteration
class LearnRateScheduler:
def __init__(self, learn_rate, max_steps, cur_step=0, verbose=True):
self.schedules = LearnScheduleIterator(learn_rate, max_steps, cur_step)
(self.learn_rate, self.end_step) = next(self.schedules)
self.verbose = verbose
if self.verbose:
print(f'Training at rate of {self.learn_rate} until step {self.end_step}')
self.finished = False
def apply(self, optimizer, step_number):
if step_number < self.end_step:
return
try:
(self.learn_rate, self.end_step) = next(self.schedules)
except Exception:
self.finished = True
return
if self.verbose:
tqdm.tqdm.write(f'Training at rate of {self.learn_rate} until step {self.end_step}')
for pg in optimizer.param_groups:
pg['lr'] = self.learn_rate
<|code_end|>
modules/textual_inversion/textual_inversion.py
<|code_start|>import os
import sys
import traceback
import torch
import tqdm
import html
import datetime
import csv
from PIL import Image, PngImagePlugin
from modules import shared, devices, sd_hijack, processing, sd_models, images
import modules.textual_inversion.dataset
from modules.textual_inversion.learn_schedule import LearnRateScheduler
from modules.textual_inversion.image_embedding import (embedding_to_b64, embedding_from_b64,
insert_image_data_embed, extract_image_data_embed,
caption_image_overlay)
class Embedding:
def __init__(self, vec, name, step=None):
self.vec = vec
self.name = name
self.step = step
self.cached_checksum = None
self.sd_checkpoint = None
self.sd_checkpoint_name = None
def save(self, filename):
embedding_data = {
"string_to_token": {"*": 265},
"string_to_param": {"*": self.vec},
"name": self.name,
"step": self.step,
"sd_checkpoint": self.sd_checkpoint,
"sd_checkpoint_name": self.sd_checkpoint_name,
}
torch.save(embedding_data, filename)
def checksum(self):
if self.cached_checksum is not None:
return self.cached_checksum
def const_hash(a):
r = 0
for v in a:
r = (r * 281 ^ int(v) * 997) & 0xFFFFFFFF
return r
self.cached_checksum = f'{const_hash(self.vec.reshape(-1) * 100) & 0xffff:04x}'
return self.cached_checksum
class EmbeddingDatabase:
def __init__(self, embeddings_dir):
self.ids_lookup = {}
self.word_embeddings = {}
self.dir_mtime = None
self.embeddings_dir = embeddings_dir
def register_embedding(self, embedding, model):
self.word_embeddings[embedding.name] = embedding
ids = model.cond_stage_model.tokenizer([embedding.name], add_special_tokens=False)['input_ids'][0]
first_id = ids[0]
if first_id not in self.ids_lookup:
self.ids_lookup[first_id] = []
self.ids_lookup[first_id] = sorted(self.ids_lookup[first_id] + [(ids, embedding)], key=lambda x: len(x[0]), reverse=True)
return embedding
def load_textual_inversion_embeddings(self):
mt = os.path.getmtime(self.embeddings_dir)
if self.dir_mtime is not None and mt <= self.dir_mtime:
return
self.dir_mtime = mt
self.ids_lookup.clear()
self.word_embeddings.clear()
def process_file(path, filename):
name = os.path.splitext(filename)[0]
data = []
if os.path.splitext(filename.upper())[-1] in ['.PNG', '.WEBP', '.JXL', '.AVIF']:
embed_image = Image.open(path)
if hasattr(embed_image, 'text') and 'sd-ti-embedding' in embed_image.text:
data = embedding_from_b64(embed_image.text['sd-ti-embedding'])
name = data.get('name', name)
else:
data = extract_image_data_embed(embed_image)
name = data.get('name', name)
else:
data = torch.load(path, map_location="cpu")
# textual inversion embeddings
if 'string_to_param' in data:
param_dict = data['string_to_param']
if hasattr(param_dict, '_parameters'):
param_dict = getattr(param_dict, '_parameters') # fix for torch 1.12.1 loading saved file from torch 1.11
assert len(param_dict) == 1, 'embedding file has multiple terms in it'
emb = next(iter(param_dict.items()))[1]
# diffuser concepts
elif type(data) == dict and type(next(iter(data.values()))) == torch.Tensor:
assert len(data.keys()) == 1, 'embedding file has multiple terms in it'
emb = next(iter(data.values()))
if len(emb.shape) == 1:
emb = emb.unsqueeze(0)
else:
raise Exception(f"Couldn't identify {filename} as neither textual inversion embedding nor diffuser concept.")
vec = emb.detach().to(devices.device, dtype=torch.float32)
embedding = Embedding(vec, name)
embedding.step = data.get('step', None)
embedding.sd_checkpoint = data.get('hash', None)
embedding.sd_checkpoint_name = data.get('sd_checkpoint_name', None)
self.register_embedding(embedding, shared.sd_model)
for fn in os.listdir(self.embeddings_dir):
try:
fullfn = os.path.join(self.embeddings_dir, fn)
if os.stat(fullfn).st_size == 0:
continue
process_file(fullfn, fn)
except Exception:
print(f"Error loading emedding {fn}:", file=sys.stderr)
print(traceback.format_exc(), file=sys.stderr)
continue
print(f"Loaded a total of {len(self.word_embeddings)} textual inversion embeddings.")
print("Embeddings:", ', '.join(self.word_embeddings.keys()))
def find_embedding_at_position(self, tokens, offset):
token = tokens[offset]
possible_matches = self.ids_lookup.get(token, None)
if possible_matches is None:
return None, None
for ids, embedding in possible_matches:
if tokens[offset:offset + len(ids)] == ids:
return embedding, len(ids)
return None, None
def create_embedding(name, num_vectors_per_token, overwrite_old, init_text='*'):
cond_model = shared.sd_model.cond_stage_model
embedding_layer = cond_model.wrapped.transformer.text_model.embeddings
with devices.autocast():
cond_model([""]) # will send cond model to GPU if lowvram/medvram is active
ids = cond_model.tokenizer(init_text, max_length=num_vectors_per_token, return_tensors="pt", add_special_tokens=False)["input_ids"]
embedded = embedding_layer.token_embedding.wrapped(ids.to(devices.device)).squeeze(0)
vec = torch.zeros((num_vectors_per_token, embedded.shape[1]), device=devices.device)
for i in range(num_vectors_per_token):
vec[i] = embedded[i * int(embedded.shape[0]) // num_vectors_per_token]
# Remove illegal characters from name.
name = "".join( x for x in name if (x.isalnum() or x in "._- "))
fn = os.path.join(shared.cmd_opts.embeddings_dir, f"{name}.pt")
if not overwrite_old:
assert not os.path.exists(fn), f"file {fn} already exists"
embedding = Embedding(vec, name)
embedding.step = 0
embedding.save(fn)
return fn
def write_loss(log_directory, filename, step, epoch_len, values):
if shared.opts.training_write_csv_every == 0:
return
if (step + 1) % shared.opts.training_write_csv_every != 0:
return
write_csv_header = False if os.path.exists(os.path.join(log_directory, filename)) else True
with open(os.path.join(log_directory, filename), "a+", newline='') as fout:
csv_writer = csv.DictWriter(fout, fieldnames=["step", "epoch", "epoch_step", *(values.keys())])
if write_csv_header:
csv_writer.writeheader()
epoch = step // epoch_len
epoch_step = step % epoch_len
csv_writer.writerow({
"step": step + 1,
"epoch": epoch,
"epoch_step": epoch_step + 1,
**values,
})
def train_embedding(embedding_name, learn_rate, batch_size, data_root, log_directory, training_width, training_height, steps, create_image_every, save_embedding_every, template_file, save_image_with_stored_embedding, preview_from_txt2img, preview_prompt, preview_negative_prompt, preview_steps, preview_sampler_index, preview_cfg_scale, preview_seed, preview_width, preview_height):
assert embedding_name, 'embedding not selected'
shared.state.textinfo = "Initializing textual inversion training..."
shared.state.job_count = steps
filename = os.path.join(shared.cmd_opts.embeddings_dir, f'{embedding_name}.pt')
log_directory = os.path.join(log_directory, datetime.datetime.now().strftime("%Y-%m-%d"), embedding_name)
if save_embedding_every > 0:
embedding_dir = os.path.join(log_directory, "embeddings")
os.makedirs(embedding_dir, exist_ok=True)
else:
embedding_dir = None
if create_image_every > 0:
images_dir = os.path.join(log_directory, "images")
os.makedirs(images_dir, exist_ok=True)
else:
images_dir = None
if create_image_every > 0 and save_image_with_stored_embedding:
images_embeds_dir = os.path.join(log_directory, "image_embeddings")
os.makedirs(images_embeds_dir, exist_ok=True)
else:
images_embeds_dir = None
cond_model = shared.sd_model.cond_stage_model
shared.state.textinfo = f"Preparing dataset from {html.escape(data_root)}..."
with torch.autocast("cuda"):
ds = modules.textual_inversion.dataset.PersonalizedBase(data_root=data_root, width=training_width, height=training_height, repeats=shared.opts.training_image_repeats_per_epoch, placeholder_token=embedding_name, model=shared.sd_model, device=devices.device, template_file=template_file, batch_size=batch_size)
hijack = sd_hijack.model_hijack
embedding = hijack.embedding_db.word_embeddings[embedding_name]
embedding.vec.requires_grad = True
losses = torch.zeros((32,))
last_saved_file = "<none>"
last_saved_image = "<none>"
forced_filename = "<none>"
embedding_yet_to_be_embedded = False
ititial_step = embedding.step or 0
if ititial_step > steps:
return embedding, filename
scheduler = LearnRateScheduler(learn_rate, steps, ititial_step)
optimizer = torch.optim.AdamW([embedding.vec], lr=scheduler.learn_rate)
pbar = tqdm.tqdm(enumerate(ds), total=steps-ititial_step)
for i, entries in pbar:
embedding.step = i + ititial_step
scheduler.apply(optimizer, embedding.step)
if scheduler.finished:
break
if shared.state.interrupted:
break
with torch.autocast("cuda"):
c = cond_model([entry.cond_text for entry in entries])
x = torch.stack([entry.latent for entry in entries]).to(devices.device)
loss = shared.sd_model(x, c)[0]
del x
losses[embedding.step % losses.shape[0]] = loss.item()
optimizer.zero_grad()
loss.backward()
optimizer.step()
steps_done = embedding.step + 1
epoch_num = embedding.step // len(ds)
epoch_step = embedding.step % len(ds)
pbar.set_description(f"[Epoch {epoch_num}: {epoch_step+1}/{len(ds)}]loss: {losses.mean():.7f}")
if embedding_dir is not None and steps_done % save_embedding_every == 0:
# Before saving, change name to match current checkpoint.
embedding.name = f'{embedding_name}-{steps_done}'
last_saved_file = os.path.join(embedding_dir, f'{embedding.name}.pt')
embedding.save(last_saved_file)
embedding_yet_to_be_embedded = True
write_loss(log_directory, "textual_inversion_loss.csv", embedding.step, len(ds), {
"loss": f"{losses.mean():.7f}",
"learn_rate": scheduler.learn_rate
})
if images_dir is not None and steps_done % create_image_every == 0:
forced_filename = f'{embedding_name}-{steps_done}'
last_saved_image = os.path.join(images_dir, forced_filename)
p = processing.StableDiffusionProcessingTxt2Img(
sd_model=shared.sd_model,
do_not_save_grid=True,
do_not_save_samples=True,
do_not_reload_embeddings=True,
)
if preview_from_txt2img:
p.prompt = preview_prompt
p.negative_prompt = preview_negative_prompt
p.steps = preview_steps
p.sampler_index = preview_sampler_index
p.cfg_scale = preview_cfg_scale
p.seed = preview_seed
p.width = preview_width
p.height = preview_height
else:
p.prompt = entries[0].cond_text
p.steps = 20
p.width = training_width
p.height = training_height
preview_text = p.prompt
processed = processing.process_images(p)
image = processed.images[0]
shared.state.current_image = image
if save_image_with_stored_embedding and os.path.exists(last_saved_file) and embedding_yet_to_be_embedded:
last_saved_image_chunks = os.path.join(images_embeds_dir, f'{embedding_name}-{steps_done}.png')
info = PngImagePlugin.PngInfo()
data = torch.load(last_saved_file)
info.add_text("sd-ti-embedding", embedding_to_b64(data))
title = "<{}>".format(data.get('name', '???'))
try:
vectorSize = list(data['string_to_param'].values())[0].shape[0]
except Exception as e:
vectorSize = '?'
checkpoint = sd_models.select_checkpoint()
footer_left = checkpoint.model_name
footer_mid = '[{}]'.format(checkpoint.hash)
footer_right = '{}v {}s'.format(vectorSize, steps_done)
captioned_image = caption_image_overlay(image, title, footer_left, footer_mid, footer_right)
captioned_image = insert_image_data_embed(captioned_image, data)
captioned_image.save(last_saved_image_chunks, "PNG", pnginfo=info)
embedding_yet_to_be_embedded = False
last_saved_image, last_text_info = images.save_image(image, images_dir, "", p.seed, p.prompt, shared.opts.samples_format, processed.infotexts[0], p=p, forced_filename=forced_filename, save_to_dirs=False)
last_saved_image += f", prompt: {preview_text}"
shared.state.job_no = embedding.step
shared.state.textinfo = f"""
<p>
Loss: {losses.mean():.7f}<br/>
Step: {embedding.step}<br/>
Last prompt: {html.escape(entries[0].cond_text)}<br/>
Last saved embedding: {html.escape(last_saved_file)}<br/>
Last saved image: {html.escape(last_saved_image)}<br/>
</p>
"""
checkpoint = sd_models.select_checkpoint()
embedding.sd_checkpoint = checkpoint.hash
embedding.sd_checkpoint_name = checkpoint.model_name
embedding.cached_checksum = None
# Before saving for the last time, change name back to base name (as opposed to the save_embedding_every step-suffixed naming convention).
embedding.name = embedding_name
filename = os.path.join(shared.cmd_opts.embeddings_dir, f'{embedding.name}.pt')
embedding.save(filename)
return embedding, filename
<|code_end|>
|
[Bug]: Error when generating with 'Highres. fix' and 'Upscale latent space image when doing hires. fix'
### Is there an existing issue for this?
- [X] I have searched the existing issues and checked the recent builds/commits
### What happened?
Attempting to use 'Highres. fix' with 'Upscale latent space image when doing hires. fix' results in the error message "UnboundLocalError: local variable 'decoded_samples' referenced before assignment" when it attempts to do the second batch of steps in the console.
When trying to generate "a cat" using stable diffusion 1.5, console prints:
```
Error completing request4:21, 7.00it/s]
Arguments: ('a cat', '', 'None', 'None', 28, 0, False, False, 1, 1, 7, -1.0, -1.0, 0, 0, 0, False, 1024, 1024, True, 0.7, 0, 0, 0, False, False, None, '', 1, '', 0, '', True, False, False) {}
Traceback (most recent call last):
File "...\modules\ui.py", line 221, in f
res = list(func(*args, **kwargs))
File "...\webui.py", line 63, in f
res = func(*args, **kwargs)
File "...\modules\txt2img.py", line 48, in txt2img
processed = process_images(p)
File "...\modules\processing.py", line 426, in process_images
res = process_images_inner(p)
File "...\modules\processing.py", line 519, in process_images_inner
samples_ddim = p.sample(conditioning=c, unconditional_conditioning=uc, seeds=seeds, subseeds=subseeds, subseed_strength=p.subseed_strength)
File "...\modules\processing.py", line 688, in sample
decoded_samples,
UnboundLocalError: local variable 'decoded_samples' referenced before assignment
```
### Steps to reproduce the problem
1. Enable 'Upscale latent space image when doing hires. fix' in Settings
2. Enable 'Highres. fix'
3. Generate
### What should have happened?
The image should have generated
### Commit where the problem happens
d885a4a57b72152745ca76192ef1bdda29e6461d
### What platforms do you use to access UI ?
Windows
### What browsers do you use to access the UI ?
Google Chrome
[Bug]: Error when generating with 'Highres. fix' and 'Upscale latent space image when doing hires. fix'
### Is there an existing issue for this?
- [X] I have searched the existing issues and checked the recent builds/commits
### What happened?
Attempting to use 'Highres. fix' with 'Upscale latent space image when doing hires. fix' results in the error message "UnboundLocalError: local variable 'decoded_samples' referenced before assignment" when it attempts to do the second batch of steps in the console.
When trying to generate "a cat" using stable diffusion 1.5, console prints:
```
Error completing request4:21, 7.00it/s]
Arguments: ('a cat', '', 'None', 'None', 28, 0, False, False, 1, 1, 7, -1.0, -1.0, 0, 0, 0, False, 1024, 1024, True, 0.7, 0, 0, 0, False, False, None, '', 1, '', 0, '', True, False, False) {}
Traceback (most recent call last):
File "...\modules\ui.py", line 221, in f
res = list(func(*args, **kwargs))
File "...\webui.py", line 63, in f
res = func(*args, **kwargs)
File "...\modules\txt2img.py", line 48, in txt2img
processed = process_images(p)
File "...\modules\processing.py", line 426, in process_images
res = process_images_inner(p)
File "...\modules\processing.py", line 519, in process_images_inner
samples_ddim = p.sample(conditioning=c, unconditional_conditioning=uc, seeds=seeds, subseeds=subseeds, subseed_strength=p.subseed_strength)
File "...\modules\processing.py", line 688, in sample
decoded_samples,
UnboundLocalError: local variable 'decoded_samples' referenced before assignment
```
### Steps to reproduce the problem
1. Enable 'Upscale latent space image when doing hires. fix' in Settings
2. Enable 'Highres. fix'
3. Generate
### What should have happened?
The image should have generated
### Commit where the problem happens
d885a4a57b72152745ca76192ef1bdda29e6461d
### What platforms do you use to access UI ?
Windows
### What browsers do you use to access the UI ?
Google Chrome
|
modules/masking.py
<|code_start|>from PIL import Image, ImageFilter, ImageOps
def get_crop_region(mask, pad=0):
"""finds a rectangular region that contains all masked ares in an image. Returns (x1, y1, x2, y2) coordinates of the rectangle.
For example, if a user has painted the top-right part of a 512x512 image", the result may be (256, 0, 512, 256)"""
h, w = mask.shape
crop_left = 0
for i in range(w):
if not (mask[:, i] == 0).all():
break
crop_left += 1
crop_right = 0
for i in reversed(range(w)):
if not (mask[:, i] == 0).all():
break
crop_right += 1
crop_top = 0
for i in range(h):
if not (mask[i] == 0).all():
break
crop_top += 1
crop_bottom = 0
for i in reversed(range(h)):
if not (mask[i] == 0).all():
break
crop_bottom += 1
return (
int(max(crop_left-pad, 0)),
int(max(crop_top-pad, 0)),
int(min(w - crop_right + pad, w)),
int(min(h - crop_bottom + pad, h))
)
def expand_crop_region(crop_region, processing_width, processing_height, image_width, image_height):
"""expands crop region get_crop_region() to match the ratio of the image the region will processed in; returns expanded region
for example, if user drew mask in a 128x32 region, and the dimensions for processing are 512x512, the region will be expanded to 128x128."""
x1, y1, x2, y2 = crop_region
ratio_crop_region = (x2 - x1) / (y2 - y1)
ratio_processing = processing_width / processing_height
if ratio_crop_region > ratio_processing:
desired_height = (x2 - x1) * ratio_processing
desired_height_diff = int(desired_height - (y2-y1))
y1 -= desired_height_diff//2
y2 += desired_height_diff - desired_height_diff//2
if y2 >= image_height:
diff = y2 - image_height
y2 -= diff
y1 -= diff
if y1 < 0:
y2 -= y1
y1 -= y1
if y2 >= image_height:
y2 = image_height
else:
desired_width = (y2 - y1) * ratio_processing
desired_width_diff = int(desired_width - (x2-x1))
x1 -= desired_width_diff//2
x2 += desired_width_diff - desired_width_diff//2
if x2 >= image_width:
diff = x2 - image_width
x2 -= diff
x1 -= diff
if x1 < 0:
x2 -= x1
x1 -= x1
if x2 >= image_width:
x2 = image_width
return x1, y1, x2, y2
def fill(image, mask):
"""fills masked regions with colors from image using blur. Not extremely effective."""
image_mod = Image.new('RGBA', (image.width, image.height))
image_masked = Image.new('RGBa', (image.width, image.height))
image_masked.paste(image.convert("RGBA").convert("RGBa"), mask=ImageOps.invert(mask.convert('L')))
image_masked = image_masked.convert('RGBa')
for radius, repeats in [(256, 1), (64, 1), (16, 2), (4, 4), (2, 2), (0, 1)]:
blurred = image_masked.filter(ImageFilter.GaussianBlur(radius)).convert('RGBA')
for _ in range(repeats):
image_mod.alpha_composite(blurred)
return image_mod.convert("RGB")
<|code_end|>
modules/processing.py
<|code_start|>import json
import math
import os
import sys
import torch
import numpy as np
from PIL import Image, ImageFilter, ImageOps
import random
import cv2
from skimage import exposure
from typing import Any, Dict, List, Optional
import modules.sd_hijack
from modules import devices, prompt_parser, masking, sd_samplers, lowvram, generation_parameters_copypaste
from modules.sd_hijack import model_hijack
from modules.shared import opts, cmd_opts, state
import modules.shared as shared
import modules.face_restoration
import modules.images as images
import modules.styles
import logging
# some of those options should not be changed at all because they would break the model, so I removed them from options.
opt_C = 4
opt_f = 8
def setup_color_correction(image):
logging.info("Calibrating color correction.")
correction_target = cv2.cvtColor(np.asarray(image.copy()), cv2.COLOR_RGB2LAB)
return correction_target
def apply_color_correction(correction, image):
logging.info("Applying color correction.")
image = Image.fromarray(cv2.cvtColor(exposure.match_histograms(
cv2.cvtColor(
np.asarray(image),
cv2.COLOR_RGB2LAB
),
correction,
channel_axis=2
), cv2.COLOR_LAB2RGB).astype("uint8"))
return image
def apply_overlay(image, paste_loc, index, overlays):
if overlays is None or index >= len(overlays):
return image
overlay = overlays[index]
if paste_loc is not None:
x, y, w, h = paste_loc
base_image = Image.new('RGBA', (overlay.width, overlay.height))
image = images.resize_image(1, image, w, h)
base_image.paste(image, (x, y))
image = base_image
image = image.convert('RGBA')
image.alpha_composite(overlay)
image = image.convert('RGB')
return image
def get_correct_sampler(p):
if isinstance(p, modules.processing.StableDiffusionProcessingTxt2Img):
return sd_samplers.samplers
elif isinstance(p, modules.processing.StableDiffusionProcessingImg2Img):
return sd_samplers.samplers_for_img2img
elif isinstance(p, modules.api.processing.StableDiffusionProcessingAPI):
return sd_samplers.samplers
class StableDiffusionProcessing():
"""
The first set of paramaters: sd_models -> do_not_reload_embeddings represent the minimum required to create a StableDiffusionProcessing
"""
def __init__(self, sd_model=None, outpath_samples=None, outpath_grids=None, prompt: str = "", styles: List[str] = None, seed: int = -1, subseed: int = -1, subseed_strength: float = 0, seed_resize_from_h: int = -1, seed_resize_from_w: int = -1, seed_enable_extras: bool = True, sampler_index: int = 0, batch_size: int = 1, n_iter: int = 1, steps: int = 50, cfg_scale: float = 7.0, width: int = 512, height: int = 512, restore_faces: bool = False, tiling: bool = False, do_not_save_samples: bool = False, do_not_save_grid: bool = False, extra_generation_params: Dict[Any, Any] = None, overlay_images: Any = None, negative_prompt: str = None, eta: float = None, do_not_reload_embeddings: bool = False, denoising_strength: float = 0, ddim_discretize: str = None, s_churn: float = 0.0, s_tmax: float = None, s_tmin: float = 0.0, s_noise: float = 1.0, override_settings: Dict[str, Any] = None):
self.sd_model = sd_model
self.outpath_samples: str = outpath_samples
self.outpath_grids: str = outpath_grids
self.prompt: str = prompt
self.prompt_for_display: str = None
self.negative_prompt: str = (negative_prompt or "")
self.styles: list = styles or []
self.seed: int = seed
self.subseed: int = subseed
self.subseed_strength: float = subseed_strength
self.seed_resize_from_h: int = seed_resize_from_h
self.seed_resize_from_w: int = seed_resize_from_w
self.sampler_index: int = sampler_index
self.batch_size: int = batch_size
self.n_iter: int = n_iter
self.steps: int = steps
self.cfg_scale: float = cfg_scale
self.width: int = width
self.height: int = height
self.restore_faces: bool = restore_faces
self.tiling: bool = tiling
self.do_not_save_samples: bool = do_not_save_samples
self.do_not_save_grid: bool = do_not_save_grid
self.extra_generation_params: dict = extra_generation_params or {}
self.overlay_images = overlay_images
self.eta = eta
self.do_not_reload_embeddings = do_not_reload_embeddings
self.paste_to = None
self.color_corrections = None
self.denoising_strength: float = denoising_strength
self.sampler_noise_scheduler_override = None
self.ddim_discretize = ddim_discretize or opts.ddim_discretize
self.s_churn = s_churn or opts.s_churn
self.s_tmin = s_tmin or opts.s_tmin
self.s_tmax = s_tmax or float('inf') # not representable as a standard ui option
self.s_noise = s_noise or opts.s_noise
self.override_settings = {k: v for k, v in (override_settings or {}).items() if k not in shared.restricted_opts}
if not seed_enable_extras:
self.subseed = -1
self.subseed_strength = 0
self.seed_resize_from_h = 0
self.seed_resize_from_w = 0
self.scripts = None
self.script_args = None
self.all_prompts = None
self.all_seeds = None
self.all_subseeds = None
def txt2img_image_conditioning(self, x, width=None, height=None):
if self.sampler.conditioning_key not in {'hybrid', 'concat'}:
# Dummy zero conditioning if we're not using inpainting model.
# Still takes up a bit of memory, but no encoder call.
# Pretty sure we can just make this a 1x1 image since its not going to be used besides its batch size.
return torch.zeros(
x.shape[0], 5, 1, 1,
dtype=x.dtype,
device=x.device
)
height = height or self.height
width = width or self.width
# The "masked-image" in this case will just be all zeros since the entire image is masked.
image_conditioning = torch.zeros(x.shape[0], 3, height, width, device=x.device)
image_conditioning = self.sd_model.get_first_stage_encoding(self.sd_model.encode_first_stage(image_conditioning))
# Add the fake full 1s mask to the first dimension.
image_conditioning = torch.nn.functional.pad(image_conditioning, (0, 0, 0, 0, 1, 0), value=1.0)
image_conditioning = image_conditioning.to(x.dtype)
return image_conditioning
def img2img_image_conditioning(self, source_image, latent_image, image_mask = None):
if self.sampler.conditioning_key not in {'hybrid', 'concat'}:
# Dummy zero conditioning if we're not using inpainting model.
return torch.zeros(
latent_image.shape[0], 5, 1, 1,
dtype=latent_image.dtype,
device=latent_image.device
)
# Handle the different mask inputs
if image_mask is not None:
if torch.is_tensor(image_mask):
conditioning_mask = image_mask
else:
conditioning_mask = np.array(image_mask.convert("L"))
conditioning_mask = conditioning_mask.astype(np.float32) / 255.0
conditioning_mask = torch.from_numpy(conditioning_mask[None, None])
# Inpainting model uses a discretized mask as input, so we round to either 1.0 or 0.0
conditioning_mask = torch.round(conditioning_mask)
else:
conditioning_mask = torch.ones(1, 1, *source_image.shape[-2:])
# Create another latent image, this time with a masked version of the original input.
# Smoothly interpolate between the masked and unmasked latent conditioning image using a parameter.
conditioning_mask = conditioning_mask.to(source_image.device)
conditioning_image = torch.lerp(
source_image,
source_image * (1.0 - conditioning_mask),
getattr(self, "inpainting_mask_weight", shared.opts.inpainting_mask_weight)
)
# Encode the new masked image using first stage of network.
conditioning_image = self.sd_model.get_first_stage_encoding(self.sd_model.encode_first_stage(conditioning_image))
# Create the concatenated conditioning tensor to be fed to `c_concat`
conditioning_mask = torch.nn.functional.interpolate(conditioning_mask, size=latent_image.shape[-2:])
conditioning_mask = conditioning_mask.expand(conditioning_image.shape[0], -1, -1, -1)
image_conditioning = torch.cat([conditioning_mask, conditioning_image], dim=1)
image_conditioning = image_conditioning.to(shared.device).type(self.sd_model.dtype)
return image_conditioning
def init(self, all_prompts, all_seeds, all_subseeds):
pass
def sample(self, conditioning, unconditional_conditioning, seeds, subseeds, subseed_strength, prompts):
raise NotImplementedError()
def close(self):
self.sd_model = None
self.sampler = None
class Processed:
def __init__(self, p: StableDiffusionProcessing, images_list, seed=-1, info="", subseed=None, all_prompts=None, all_seeds=None, all_subseeds=None, index_of_first_image=0, infotexts=None):
self.images = images_list
self.prompt = p.prompt
self.negative_prompt = p.negative_prompt
self.seed = seed
self.subseed = subseed
self.subseed_strength = p.subseed_strength
self.info = info
self.width = p.width
self.height = p.height
self.sampler_index = p.sampler_index
self.sampler = sd_samplers.samplers[p.sampler_index].name
self.cfg_scale = p.cfg_scale
self.steps = p.steps
self.batch_size = p.batch_size
self.restore_faces = p.restore_faces
self.face_restoration_model = opts.face_restoration_model if p.restore_faces else None
self.sd_model_hash = shared.sd_model.sd_model_hash
self.seed_resize_from_w = p.seed_resize_from_w
self.seed_resize_from_h = p.seed_resize_from_h
self.denoising_strength = getattr(p, 'denoising_strength', None)
self.extra_generation_params = p.extra_generation_params
self.index_of_first_image = index_of_first_image
self.styles = p.styles
self.job_timestamp = state.job_timestamp
self.clip_skip = opts.CLIP_stop_at_last_layers
self.eta = p.eta
self.ddim_discretize = p.ddim_discretize
self.s_churn = p.s_churn
self.s_tmin = p.s_tmin
self.s_tmax = p.s_tmax
self.s_noise = p.s_noise
self.sampler_noise_scheduler_override = p.sampler_noise_scheduler_override
self.prompt = self.prompt if type(self.prompt) != list else self.prompt[0]
self.negative_prompt = self.negative_prompt if type(self.negative_prompt) != list else self.negative_prompt[0]
self.seed = int(self.seed if type(self.seed) != list else self.seed[0]) if self.seed is not None else -1
self.subseed = int(self.subseed if type(self.subseed) != list else self.subseed[0]) if self.subseed is not None else -1
self.all_prompts = all_prompts or [self.prompt]
self.all_seeds = all_seeds or [self.seed]
self.all_subseeds = all_subseeds or [self.subseed]
self.infotexts = infotexts or [info]
def js(self):
obj = {
"prompt": self.prompt,
"all_prompts": self.all_prompts,
"negative_prompt": self.negative_prompt,
"seed": self.seed,
"all_seeds": self.all_seeds,
"subseed": self.subseed,
"all_subseeds": self.all_subseeds,
"subseed_strength": self.subseed_strength,
"width": self.width,
"height": self.height,
"sampler_index": self.sampler_index,
"sampler": self.sampler,
"cfg_scale": self.cfg_scale,
"steps": self.steps,
"batch_size": self.batch_size,
"restore_faces": self.restore_faces,
"face_restoration_model": self.face_restoration_model,
"sd_model_hash": self.sd_model_hash,
"seed_resize_from_w": self.seed_resize_from_w,
"seed_resize_from_h": self.seed_resize_from_h,
"denoising_strength": self.denoising_strength,
"extra_generation_params": self.extra_generation_params,
"index_of_first_image": self.index_of_first_image,
"infotexts": self.infotexts,
"styles": self.styles,
"job_timestamp": self.job_timestamp,
"clip_skip": self.clip_skip,
}
return json.dumps(obj)
def infotext(self, p: StableDiffusionProcessing, index):
return create_infotext(p, self.all_prompts, self.all_seeds, self.all_subseeds, comments=[], position_in_batch=index % self.batch_size, iteration=index // self.batch_size)
# from https://discuss.pytorch.org/t/help-regarding-slerp-function-for-generative-model-sampling/32475/3
def slerp(val, low, high):
low_norm = low/torch.norm(low, dim=1, keepdim=True)
high_norm = high/torch.norm(high, dim=1, keepdim=True)
dot = (low_norm*high_norm).sum(1)
if dot.mean() > 0.9995:
return low * val + high * (1 - val)
omega = torch.acos(dot)
so = torch.sin(omega)
res = (torch.sin((1.0-val)*omega)/so).unsqueeze(1)*low + (torch.sin(val*omega)/so).unsqueeze(1) * high
return res
def create_random_tensors(shape, seeds, subseeds=None, subseed_strength=0.0, seed_resize_from_h=0, seed_resize_from_w=0, p=None):
xs = []
# if we have multiple seeds, this means we are working with batch size>1; this then
# enables the generation of additional tensors with noise that the sampler will use during its processing.
# Using those pre-generated tensors instead of simple torch.randn allows a batch with seeds [100, 101] to
# produce the same images as with two batches [100], [101].
if p is not None and p.sampler is not None and (len(seeds) > 1 and opts.enable_batch_seeds or opts.eta_noise_seed_delta > 0):
sampler_noises = [[] for _ in range(p.sampler.number_of_needed_noises(p))]
else:
sampler_noises = None
for i, seed in enumerate(seeds):
noise_shape = shape if seed_resize_from_h <= 0 or seed_resize_from_w <= 0 else (shape[0], seed_resize_from_h//8, seed_resize_from_w//8)
subnoise = None
if subseeds is not None:
subseed = 0 if i >= len(subseeds) else subseeds[i]
subnoise = devices.randn(subseed, noise_shape)
# randn results depend on device; gpu and cpu get different results for same seed;
# the way I see it, it's better to do this on CPU, so that everyone gets same result;
# but the original script had it like this, so I do not dare change it for now because
# it will break everyone's seeds.
noise = devices.randn(seed, noise_shape)
if subnoise is not None:
noise = slerp(subseed_strength, noise, subnoise)
if noise_shape != shape:
x = devices.randn(seed, shape)
dx = (shape[2] - noise_shape[2]) // 2
dy = (shape[1] - noise_shape[1]) // 2
w = noise_shape[2] if dx >= 0 else noise_shape[2] + 2 * dx
h = noise_shape[1] if dy >= 0 else noise_shape[1] + 2 * dy
tx = 0 if dx < 0 else dx
ty = 0 if dy < 0 else dy
dx = max(-dx, 0)
dy = max(-dy, 0)
x[:, ty:ty+h, tx:tx+w] = noise[:, dy:dy+h, dx:dx+w]
noise = x
if sampler_noises is not None:
cnt = p.sampler.number_of_needed_noises(p)
if opts.eta_noise_seed_delta > 0:
torch.manual_seed(seed + opts.eta_noise_seed_delta)
for j in range(cnt):
sampler_noises[j].append(devices.randn_without_seed(tuple(noise_shape)))
xs.append(noise)
if sampler_noises is not None:
p.sampler.sampler_noises = [torch.stack(n).to(shared.device) for n in sampler_noises]
x = torch.stack(xs).to(shared.device)
return x
def decode_first_stage(model, x):
with devices.autocast(disable=x.dtype == devices.dtype_vae):
x = model.decode_first_stage(x)
return x
def get_fixed_seed(seed):
if seed is None or seed == '' or seed == -1:
return int(random.randrange(4294967294))
return seed
def fix_seed(p):
p.seed = get_fixed_seed(p.seed)
p.subseed = get_fixed_seed(p.subseed)
def create_infotext(p, all_prompts, all_seeds, all_subseeds, comments, iteration=0, position_in_batch=0):
index = position_in_batch + iteration * p.batch_size
clip_skip = getattr(p, 'clip_skip', opts.CLIP_stop_at_last_layers)
generation_params = {
"Steps": p.steps,
"Sampler": get_correct_sampler(p)[p.sampler_index].name,
"CFG scale": p.cfg_scale,
"Seed": all_seeds[index],
"Face restoration": (opts.face_restoration_model if p.restore_faces else None),
"Size": f"{p.width}x{p.height}",
"Model hash": getattr(p, 'sd_model_hash', None if not opts.add_model_hash_to_info or not shared.sd_model.sd_model_hash else shared.sd_model.sd_model_hash),
"Model": (None if not opts.add_model_name_to_info or not shared.sd_model.sd_checkpoint_info.model_name else shared.sd_model.sd_checkpoint_info.model_name.replace(',', '').replace(':', '')),
"Hypernet": (None if shared.loaded_hypernetwork is None else shared.loaded_hypernetwork.name),
"Hypernet strength": (None if shared.loaded_hypernetwork is None or shared.opts.sd_hypernetwork_strength >= 1 else shared.opts.sd_hypernetwork_strength),
"Batch size": (None if p.batch_size < 2 else p.batch_size),
"Batch pos": (None if p.batch_size < 2 else position_in_batch),
"Variation seed": (None if p.subseed_strength == 0 else all_subseeds[index]),
"Variation seed strength": (None if p.subseed_strength == 0 else p.subseed_strength),
"Seed resize from": (None if p.seed_resize_from_w == 0 or p.seed_resize_from_h == 0 else f"{p.seed_resize_from_w}x{p.seed_resize_from_h}"),
"Denoising strength": getattr(p, 'denoising_strength', None),
"Eta": (None if p.sampler is None or p.sampler.eta == p.sampler.default_eta else p.sampler.eta),
"Clip skip": None if clip_skip <= 1 else clip_skip,
"ENSD": None if opts.eta_noise_seed_delta == 0 else opts.eta_noise_seed_delta,
}
generation_params.update(p.extra_generation_params)
generation_params_text = ", ".join([k if k == v else f'{k}: {generation_parameters_copypaste.quote(v)}' for k, v in generation_params.items() if v is not None])
negative_prompt_text = "\nNegative prompt: " + p.negative_prompt if p.negative_prompt else ""
return f"{all_prompts[index]}{negative_prompt_text}\n{generation_params_text}".strip()
def process_images(p: StableDiffusionProcessing) -> Processed:
stored_opts = {k: opts.data[k] for k in p.override_settings.keys()}
try:
for k, v in p.override_settings.items():
opts.data[k] = v # we don't call onchange for simplicity which makes changing model, hypernet impossible
res = process_images_inner(p)
finally:
for k, v in stored_opts.items():
opts.data[k] = v
return res
def process_images_inner(p: StableDiffusionProcessing) -> Processed:
"""this is the main loop that both txt2img and img2img use; it calls func_init once inside all the scopes and func_sample once per batch"""
if type(p.prompt) == list:
assert(len(p.prompt) > 0)
else:
assert p.prompt is not None
with open(os.path.join(shared.script_path, "params.txt"), "w", encoding="utf8") as file:
processed = Processed(p, [], p.seed, "")
file.write(processed.infotext(p, 0))
devices.torch_gc()
seed = get_fixed_seed(p.seed)
subseed = get_fixed_seed(p.subseed)
modules.sd_hijack.model_hijack.apply_circular(p.tiling)
modules.sd_hijack.model_hijack.clear_comments()
comments = {}
shared.prompt_styles.apply_styles(p)
if type(p.prompt) == list:
p.all_prompts = p.prompt
else:
p.all_prompts = p.batch_size * p.n_iter * [p.prompt]
if type(seed) == list:
p.all_seeds = seed
else:
p.all_seeds = [int(seed) + (x if p.subseed_strength == 0 else 0) for x in range(len(p.all_prompts))]
if type(subseed) == list:
p.all_subseeds = subseed
else:
p.all_subseeds = [int(subseed) + x for x in range(len(p.all_prompts))]
def infotext(iteration=0, position_in_batch=0):
return create_infotext(p, p.all_prompts, p.all_seeds, p.all_subseeds, comments, iteration, position_in_batch)
if os.path.exists(cmd_opts.embeddings_dir) and not p.do_not_reload_embeddings:
model_hijack.embedding_db.load_textual_inversion_embeddings()
if p.scripts is not None:
p.scripts.process(p)
infotexts = []
output_images = []
with torch.no_grad(), p.sd_model.ema_scope():
with devices.autocast():
p.init(p.all_prompts, p.all_seeds, p.all_subseeds)
if state.job_count == -1:
state.job_count = p.n_iter
for n in range(p.n_iter):
if state.skipped:
state.skipped = False
if state.interrupted:
break
prompts = p.all_prompts[n * p.batch_size:(n + 1) * p.batch_size]
seeds = p.all_seeds[n * p.batch_size:(n + 1) * p.batch_size]
subseeds = p.all_subseeds[n * p.batch_size:(n + 1) * p.batch_size]
if len(prompts) == 0:
break
with devices.autocast():
uc = prompt_parser.get_learned_conditioning(shared.sd_model, len(prompts) * [p.negative_prompt], p.steps)
c = prompt_parser.get_multicond_learned_conditioning(shared.sd_model, prompts, p.steps)
if len(model_hijack.comments) > 0:
for comment in model_hijack.comments:
comments[comment] = 1
if p.n_iter > 1:
shared.state.job = f"Batch {n+1} out of {p.n_iter}"
with devices.autocast():
samples_ddim = p.sample(conditioning=c, unconditional_conditioning=uc, seeds=seeds, subseeds=subseeds, subseed_strength=p.subseed_strength, prompts=prompts)
samples_ddim = samples_ddim.to(devices.dtype_vae)
x_samples_ddim = decode_first_stage(p.sd_model, samples_ddim)
x_samples_ddim = torch.clamp((x_samples_ddim + 1.0) / 2.0, min=0.0, max=1.0)
del samples_ddim
if shared.cmd_opts.lowvram or shared.cmd_opts.medvram:
lowvram.send_everything_to_cpu()
devices.torch_gc()
if opts.filter_nsfw:
import modules.safety as safety
x_samples_ddim = modules.safety.censor_batch(x_samples_ddim)
for i, x_sample in enumerate(x_samples_ddim):
x_sample = 255. * np.moveaxis(x_sample.cpu().numpy(), 0, 2)
x_sample = x_sample.astype(np.uint8)
if p.restore_faces:
if opts.save and not p.do_not_save_samples and opts.save_images_before_face_restoration:
images.save_image(Image.fromarray(x_sample), p.outpath_samples, "", seeds[i], prompts[i], opts.samples_format, info=infotext(n, i), p=p, suffix="-before-face-restoration")
devices.torch_gc()
x_sample = modules.face_restoration.restore_faces(x_sample)
devices.torch_gc()
image = Image.fromarray(x_sample)
if p.color_corrections is not None and i < len(p.color_corrections):
if opts.save and not p.do_not_save_samples and opts.save_images_before_color_correction:
image_without_cc = apply_overlay(image, p.paste_to, i, p.overlay_images)
images.save_image(image_without_cc, p.outpath_samples, "", seeds[i], prompts[i], opts.samples_format, info=infotext(n, i), p=p, suffix="-before-color-correction")
image = apply_color_correction(p.color_corrections[i], image)
image = apply_overlay(image, p.paste_to, i, p.overlay_images)
if opts.samples_save and not p.do_not_save_samples:
images.save_image(image, p.outpath_samples, "", seeds[i], prompts[i], opts.samples_format, info=infotext(n, i), p=p)
text = infotext(n, i)
infotexts.append(text)
if opts.enable_pnginfo:
image.info["parameters"] = text
output_images.append(image)
del x_samples_ddim
devices.torch_gc()
state.nextjob()
p.color_corrections = None
index_of_first_image = 0
unwanted_grid_because_of_img_count = len(output_images) < 2 and opts.grid_only_if_multiple
if (opts.return_grid or opts.grid_save) and not p.do_not_save_grid and not unwanted_grid_because_of_img_count:
grid = images.image_grid(output_images, p.batch_size)
if opts.return_grid:
text = infotext()
infotexts.insert(0, text)
if opts.enable_pnginfo:
grid.info["parameters"] = text
output_images.insert(0, grid)
index_of_first_image = 1
if opts.grid_save:
images.save_image(grid, p.outpath_grids, "grid", p.all_seeds[0], p.all_prompts[0], opts.grid_format, info=infotext(), short_filename=not opts.grid_extended_filename, p=p, grid=True)
devices.torch_gc()
res = Processed(p, output_images, p.all_seeds[0], infotext() + "".join(["\n\n" + x for x in comments]), subseed=p.all_subseeds[0], all_prompts=p.all_prompts, all_seeds=p.all_seeds, all_subseeds=p.all_subseeds, index_of_first_image=index_of_first_image, infotexts=infotexts)
if p.scripts is not None:
p.scripts.postprocess(p, res)
return res
class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing):
sampler = None
def __init__(self, enable_hr: bool=False, denoising_strength: float=0.75, firstphase_width: int=0, firstphase_height: int=0, **kwargs):
super().__init__(**kwargs)
self.enable_hr = enable_hr
self.denoising_strength = denoising_strength
self.firstphase_width = firstphase_width
self.firstphase_height = firstphase_height
self.truncate_x = 0
self.truncate_y = 0
def init(self, all_prompts, all_seeds, all_subseeds):
if self.enable_hr:
if state.job_count == -1:
state.job_count = self.n_iter * 2
else:
state.job_count = state.job_count * 2
self.extra_generation_params["First pass size"] = f"{self.firstphase_width}x{self.firstphase_height}"
if self.firstphase_width == 0 or self.firstphase_height == 0:
desired_pixel_count = 512 * 512
actual_pixel_count = self.width * self.height
scale = math.sqrt(desired_pixel_count / actual_pixel_count)
self.firstphase_width = math.ceil(scale * self.width / 64) * 64
self.firstphase_height = math.ceil(scale * self.height / 64) * 64
firstphase_width_truncated = int(scale * self.width)
firstphase_height_truncated = int(scale * self.height)
else:
width_ratio = self.width / self.firstphase_width
height_ratio = self.height / self.firstphase_height
if width_ratio > height_ratio:
firstphase_width_truncated = self.firstphase_width
firstphase_height_truncated = self.firstphase_width * self.height / self.width
else:
firstphase_width_truncated = self.firstphase_height * self.width / self.height
firstphase_height_truncated = self.firstphase_height
self.truncate_x = int(self.firstphase_width - firstphase_width_truncated) // opt_f
self.truncate_y = int(self.firstphase_height - firstphase_height_truncated) // opt_f
def sample(self, conditioning, unconditional_conditioning, seeds, subseeds, subseed_strength, prompts):
self.sampler = sd_samplers.create_sampler_with_index(sd_samplers.samplers, self.sampler_index, self.sd_model)
if not self.enable_hr:
x = create_random_tensors([opt_C, self.height // opt_f, self.width // opt_f], seeds=seeds, subseeds=subseeds, subseed_strength=self.subseed_strength, seed_resize_from_h=self.seed_resize_from_h, seed_resize_from_w=self.seed_resize_from_w, p=self)
samples = self.sampler.sample(self, x, conditioning, unconditional_conditioning, image_conditioning=self.txt2img_image_conditioning(x))
return samples
x = create_random_tensors([opt_C, self.firstphase_height // opt_f, self.firstphase_width // opt_f], seeds=seeds, subseeds=subseeds, subseed_strength=self.subseed_strength, seed_resize_from_h=self.seed_resize_from_h, seed_resize_from_w=self.seed_resize_from_w, p=self)
samples = self.sampler.sample(self, x, conditioning, unconditional_conditioning, image_conditioning=self.txt2img_image_conditioning(x, self.firstphase_width, self.firstphase_height))
samples = samples[:, :, self.truncate_y//2:samples.shape[2]-self.truncate_y//2, self.truncate_x//2:samples.shape[3]-self.truncate_x//2]
"""saves image before applying hires fix, if enabled in options; takes as an arguyment either an image or batch with latent space images"""
def save_intermediate(image, index):
if not opts.save or self.do_not_save_samples or not opts.save_images_before_highres_fix:
return
if not isinstance(image, Image.Image):
image = sd_samplers.sample_to_image(image, index)
images.save_image(image, self.outpath_samples, "", seeds[index], prompts[index], opts.samples_format, suffix="-before-highres-fix")
if opts.use_scale_latent_for_hires_fix:
samples = torch.nn.functional.interpolate(samples, size=(self.height // opt_f, self.width // opt_f), mode="bilinear")
for i in range(samples.shape[0]):
save_intermediate(samples, i)
else:
decoded_samples = decode_first_stage(self.sd_model, samples)
lowres_samples = torch.clamp((decoded_samples + 1.0) / 2.0, min=0.0, max=1.0)
batch_images = []
for i, x_sample in enumerate(lowres_samples):
x_sample = 255. * np.moveaxis(x_sample.cpu().numpy(), 0, 2)
x_sample = x_sample.astype(np.uint8)
image = Image.fromarray(x_sample)
save_intermediate(image, i)
image = images.resize_image(0, image, self.width, self.height)
image = np.array(image).astype(np.float32) / 255.0
image = np.moveaxis(image, 2, 0)
batch_images.append(image)
decoded_samples = torch.from_numpy(np.array(batch_images))
decoded_samples = decoded_samples.to(shared.device)
decoded_samples = 2. * decoded_samples - 1.
samples = self.sd_model.get_first_stage_encoding(self.sd_model.encode_first_stage(decoded_samples))
shared.state.nextjob()
self.sampler = sd_samplers.create_sampler_with_index(sd_samplers.samplers, self.sampler_index, self.sd_model)
noise = create_random_tensors(samples.shape[1:], seeds=seeds, subseeds=subseeds, subseed_strength=subseed_strength, seed_resize_from_h=self.seed_resize_from_h, seed_resize_from_w=self.seed_resize_from_w, p=self)
image_conditioning = self.txt2img_image_conditioning(x)
# GC now before running the next img2img to prevent running out of memory
x = None
devices.torch_gc()
samples = self.sampler.sample_img2img(self, samples, noise, conditioning, unconditional_conditioning, steps=self.steps, image_conditioning=image_conditioning)
return samples
class StableDiffusionProcessingImg2Img(StableDiffusionProcessing):
sampler = None
def __init__(self, init_images: list=None, resize_mode: int=0, denoising_strength: float=0.75, mask: Any=None, mask_blur: int=4, inpainting_fill: int=0, inpaint_full_res: bool=True, inpaint_full_res_padding: int=0, inpainting_mask_invert: int=0, **kwargs):
super().__init__(**kwargs)
self.init_images = init_images
self.resize_mode: int = resize_mode
self.denoising_strength: float = denoising_strength
self.init_latent = None
self.image_mask = mask
#self.image_unblurred_mask = None
self.latent_mask = None
self.mask_for_overlay = None
self.mask_blur = mask_blur
self.inpainting_fill = inpainting_fill
self.inpaint_full_res = inpaint_full_res
self.inpaint_full_res_padding = inpaint_full_res_padding
self.inpainting_mask_invert = inpainting_mask_invert
self.mask = None
self.nmask = None
self.image_conditioning = None
def init(self, all_prompts, all_seeds, all_subseeds):
self.sampler = sd_samplers.create_sampler_with_index(sd_samplers.samplers_for_img2img, self.sampler_index, self.sd_model)
crop_region = None
if self.image_mask is not None:
self.image_mask = self.image_mask.convert('L')
if self.inpainting_mask_invert:
self.image_mask = ImageOps.invert(self.image_mask)
#self.image_unblurred_mask = self.image_mask
if self.mask_blur > 0:
self.image_mask = self.image_mask.filter(ImageFilter.GaussianBlur(self.mask_blur))
if self.inpaint_full_res:
self.mask_for_overlay = self.image_mask
mask = self.image_mask.convert('L')
crop_region = masking.get_crop_region(np.array(mask), self.inpaint_full_res_padding)
crop_region = masking.expand_crop_region(crop_region, self.width, self.height, mask.width, mask.height)
x1, y1, x2, y2 = crop_region
mask = mask.crop(crop_region)
self.image_mask = images.resize_image(2, mask, self.width, self.height)
self.paste_to = (x1, y1, x2-x1, y2-y1)
else:
self.image_mask = images.resize_image(self.resize_mode, self.image_mask, self.width, self.height)
np_mask = np.array(self.image_mask)
np_mask = np.clip((np_mask.astype(np.float32)) * 2, 0, 255).astype(np.uint8)
self.mask_for_overlay = Image.fromarray(np_mask)
self.overlay_images = []
latent_mask = self.latent_mask if self.latent_mask is not None else self.image_mask
add_color_corrections = opts.img2img_color_correction and self.color_corrections is None
if add_color_corrections:
self.color_corrections = []
imgs = []
for img in self.init_images:
image = img.convert("RGB")
if crop_region is None:
image = images.resize_image(self.resize_mode, image, self.width, self.height)
if self.image_mask is not None:
image_masked = Image.new('RGBa', (image.width, image.height))
image_masked.paste(image.convert("RGBA").convert("RGBa"), mask=ImageOps.invert(self.mask_for_overlay.convert('L')))
self.overlay_images.append(image_masked.convert('RGBA'))
if crop_region is not None:
image = image.crop(crop_region)
image = images.resize_image(2, image, self.width, self.height)
if self.image_mask is not None:
if self.inpainting_fill != 1:
image = masking.fill(image, latent_mask)
if add_color_corrections:
self.color_corrections.append(setup_color_correction(image))
image = np.array(image).astype(np.float32) / 255.0
image = np.moveaxis(image, 2, 0)
imgs.append(image)
if len(imgs) == 1:
batch_images = np.expand_dims(imgs[0], axis=0).repeat(self.batch_size, axis=0)
if self.overlay_images is not None:
self.overlay_images = self.overlay_images * self.batch_size
if self.color_corrections is not None and len(self.color_corrections) == 1:
self.color_corrections = self.color_corrections * self.batch_size
elif len(imgs) <= self.batch_size:
self.batch_size = len(imgs)
batch_images = np.array(imgs)
else:
raise RuntimeError(f"bad number of images passed: {len(imgs)}; expecting {self.batch_size} or less")
image = torch.from_numpy(batch_images)
image = 2. * image - 1.
image = image.to(shared.device)
self.init_latent = self.sd_model.get_first_stage_encoding(self.sd_model.encode_first_stage(image))
if self.image_mask is not None:
init_mask = latent_mask
latmask = init_mask.convert('RGB').resize((self.init_latent.shape[3], self.init_latent.shape[2]))
latmask = np.moveaxis(np.array(latmask, dtype=np.float32), 2, 0) / 255
latmask = latmask[0]
latmask = np.around(latmask)
latmask = np.tile(latmask[None], (4, 1, 1))
self.mask = torch.asarray(1.0 - latmask).to(shared.device).type(self.sd_model.dtype)
self.nmask = torch.asarray(latmask).to(shared.device).type(self.sd_model.dtype)
# this needs to be fixed to be done in sample() using actual seeds for batches
if self.inpainting_fill == 2:
self.init_latent = self.init_latent * self.mask + create_random_tensors(self.init_latent.shape[1:], all_seeds[0:self.init_latent.shape[0]]) * self.nmask
elif self.inpainting_fill == 3:
self.init_latent = self.init_latent * self.mask
self.image_conditioning = self.img2img_image_conditioning(image, self.init_latent, self.image_mask)
def sample(self, conditioning, unconditional_conditioning, seeds, subseeds, subseed_strength, prompts):
x = create_random_tensors([opt_C, self.height // opt_f, self.width // opt_f], seeds=seeds, subseeds=subseeds, subseed_strength=self.subseed_strength, seed_resize_from_h=self.seed_resize_from_h, seed_resize_from_w=self.seed_resize_from_w, p=self)
samples = self.sampler.sample_img2img(self, self.init_latent, x, conditioning, unconditional_conditioning, image_conditioning=self.image_conditioning)
if self.mask is not None:
samples = samples * self.nmask + self.init_latent * self.mask
del x
devices.torch_gc()
return samples
<|code_end|>
|
modules/masking.py
<|code_start|>from PIL import Image, ImageFilter, ImageOps
def get_crop_region(mask, pad=0):
"""finds a rectangular region that contains all masked ares in an image. Returns (x1, y1, x2, y2) coordinates of the rectangle.
For example, if a user has painted the top-right part of a 512x512 image", the result may be (256, 0, 512, 256)"""
h, w = mask.shape
crop_left = 0
for i in range(w):
if not (mask[:, i] == 0).all():
break
crop_left += 1
crop_right = 0
for i in reversed(range(w)):
if not (mask[:, i] == 0).all():
break
crop_right += 1
crop_top = 0
for i in range(h):
if not (mask[i] == 0).all():
break
crop_top += 1
crop_bottom = 0
for i in reversed(range(h)):
if not (mask[i] == 0).all():
break
crop_bottom += 1
return (
int(max(crop_left-pad, 0)),
int(max(crop_top-pad, 0)),
int(min(w - crop_right + pad, w)),
int(min(h - crop_bottom + pad, h))
)
def expand_crop_region(crop_region, processing_width, processing_height, image_width, image_height):
"""expands crop region get_crop_region() to match the ratio of the image the region will processed in; returns expanded region
for example, if user drew mask in a 128x32 region, and the dimensions for processing are 512x512, the region will be expanded to 128x128."""
x1, y1, x2, y2 = crop_region
ratio_crop_region = (x2 - x1) / (y2 - y1)
ratio_processing = processing_width / processing_height
if ratio_crop_region > ratio_processing:
desired_height = (x2 - x1) / ratio_processing
desired_height_diff = int(desired_height - (y2-y1))
y1 -= desired_height_diff//2
y2 += desired_height_diff - desired_height_diff//2
if y2 >= image_height:
diff = y2 - image_height
y2 -= diff
y1 -= diff
if y1 < 0:
y2 -= y1
y1 -= y1
if y2 >= image_height:
y2 = image_height
else:
desired_width = (y2 - y1) * ratio_processing
desired_width_diff = int(desired_width - (x2-x1))
x1 -= desired_width_diff//2
x2 += desired_width_diff - desired_width_diff//2
if x2 >= image_width:
diff = x2 - image_width
x2 -= diff
x1 -= diff
if x1 < 0:
x2 -= x1
x1 -= x1
if x2 >= image_width:
x2 = image_width
return x1, y1, x2, y2
def fill(image, mask):
"""fills masked regions with colors from image using blur. Not extremely effective."""
image_mod = Image.new('RGBA', (image.width, image.height))
image_masked = Image.new('RGBa', (image.width, image.height))
image_masked.paste(image.convert("RGBA").convert("RGBa"), mask=ImageOps.invert(mask.convert('L')))
image_masked = image_masked.convert('RGBa')
for radius, repeats in [(256, 1), (64, 1), (16, 2), (4, 4), (2, 2), (0, 1)]:
blurred = image_masked.filter(ImageFilter.GaussianBlur(radius)).convert('RGBA')
for _ in range(repeats):
image_mod.alpha_composite(blurred)
return image_mod.convert("RGB")
<|code_end|>
modules/processing.py
<|code_start|>import json
import math
import os
import sys
import torch
import numpy as np
from PIL import Image, ImageFilter, ImageOps
import random
import cv2
from skimage import exposure
from typing import Any, Dict, List, Optional
import modules.sd_hijack
from modules import devices, prompt_parser, masking, sd_samplers, lowvram, generation_parameters_copypaste
from modules.sd_hijack import model_hijack
from modules.shared import opts, cmd_opts, state
import modules.shared as shared
import modules.face_restoration
import modules.images as images
import modules.styles
import logging
# some of those options should not be changed at all because they would break the model, so I removed them from options.
opt_C = 4
opt_f = 8
def setup_color_correction(image):
logging.info("Calibrating color correction.")
correction_target = cv2.cvtColor(np.asarray(image.copy()), cv2.COLOR_RGB2LAB)
return correction_target
def apply_color_correction(correction, image):
logging.info("Applying color correction.")
image = Image.fromarray(cv2.cvtColor(exposure.match_histograms(
cv2.cvtColor(
np.asarray(image),
cv2.COLOR_RGB2LAB
),
correction,
channel_axis=2
), cv2.COLOR_LAB2RGB).astype("uint8"))
return image
def apply_overlay(image, paste_loc, index, overlays):
if overlays is None or index >= len(overlays):
return image
overlay = overlays[index]
if paste_loc is not None:
x, y, w, h = paste_loc
base_image = Image.new('RGBA', (overlay.width, overlay.height))
image = images.resize_image(1, image, w, h)
base_image.paste(image, (x, y))
image = base_image
image = image.convert('RGBA')
image.alpha_composite(overlay)
image = image.convert('RGB')
return image
def get_correct_sampler(p):
if isinstance(p, modules.processing.StableDiffusionProcessingTxt2Img):
return sd_samplers.samplers
elif isinstance(p, modules.processing.StableDiffusionProcessingImg2Img):
return sd_samplers.samplers_for_img2img
elif isinstance(p, modules.api.processing.StableDiffusionProcessingAPI):
return sd_samplers.samplers
class StableDiffusionProcessing():
"""
The first set of paramaters: sd_models -> do_not_reload_embeddings represent the minimum required to create a StableDiffusionProcessing
"""
def __init__(self, sd_model=None, outpath_samples=None, outpath_grids=None, prompt: str = "", styles: List[str] = None, seed: int = -1, subseed: int = -1, subseed_strength: float = 0, seed_resize_from_h: int = -1, seed_resize_from_w: int = -1, seed_enable_extras: bool = True, sampler_index: int = 0, batch_size: int = 1, n_iter: int = 1, steps: int = 50, cfg_scale: float = 7.0, width: int = 512, height: int = 512, restore_faces: bool = False, tiling: bool = False, do_not_save_samples: bool = False, do_not_save_grid: bool = False, extra_generation_params: Dict[Any, Any] = None, overlay_images: Any = None, negative_prompt: str = None, eta: float = None, do_not_reload_embeddings: bool = False, denoising_strength: float = 0, ddim_discretize: str = None, s_churn: float = 0.0, s_tmax: float = None, s_tmin: float = 0.0, s_noise: float = 1.0, override_settings: Dict[str, Any] = None):
self.sd_model = sd_model
self.outpath_samples: str = outpath_samples
self.outpath_grids: str = outpath_grids
self.prompt: str = prompt
self.prompt_for_display: str = None
self.negative_prompt: str = (negative_prompt or "")
self.styles: list = styles or []
self.seed: int = seed
self.subseed: int = subseed
self.subseed_strength: float = subseed_strength
self.seed_resize_from_h: int = seed_resize_from_h
self.seed_resize_from_w: int = seed_resize_from_w
self.sampler_index: int = sampler_index
self.batch_size: int = batch_size
self.n_iter: int = n_iter
self.steps: int = steps
self.cfg_scale: float = cfg_scale
self.width: int = width
self.height: int = height
self.restore_faces: bool = restore_faces
self.tiling: bool = tiling
self.do_not_save_samples: bool = do_not_save_samples
self.do_not_save_grid: bool = do_not_save_grid
self.extra_generation_params: dict = extra_generation_params or {}
self.overlay_images = overlay_images
self.eta = eta
self.do_not_reload_embeddings = do_not_reload_embeddings
self.paste_to = None
self.color_corrections = None
self.denoising_strength: float = denoising_strength
self.sampler_noise_scheduler_override = None
self.ddim_discretize = ddim_discretize or opts.ddim_discretize
self.s_churn = s_churn or opts.s_churn
self.s_tmin = s_tmin or opts.s_tmin
self.s_tmax = s_tmax or float('inf') # not representable as a standard ui option
self.s_noise = s_noise or opts.s_noise
self.override_settings = {k: v for k, v in (override_settings or {}).items() if k not in shared.restricted_opts}
if not seed_enable_extras:
self.subseed = -1
self.subseed_strength = 0
self.seed_resize_from_h = 0
self.seed_resize_from_w = 0
self.scripts = None
self.script_args = None
self.all_prompts = None
self.all_seeds = None
self.all_subseeds = None
def txt2img_image_conditioning(self, x, width=None, height=None):
if self.sampler.conditioning_key not in {'hybrid', 'concat'}:
# Dummy zero conditioning if we're not using inpainting model.
# Still takes up a bit of memory, but no encoder call.
# Pretty sure we can just make this a 1x1 image since its not going to be used besides its batch size.
return x.new_zeros(x.shape[0], 5, 1, 1)
height = height or self.height
width = width or self.width
# The "masked-image" in this case will just be all zeros since the entire image is masked.
image_conditioning = torch.zeros(x.shape[0], 3, height, width, device=x.device)
image_conditioning = self.sd_model.get_first_stage_encoding(self.sd_model.encode_first_stage(image_conditioning))
# Add the fake full 1s mask to the first dimension.
image_conditioning = torch.nn.functional.pad(image_conditioning, (0, 0, 0, 0, 1, 0), value=1.0)
image_conditioning = image_conditioning.to(x.dtype)
return image_conditioning
def img2img_image_conditioning(self, source_image, latent_image, image_mask = None):
if self.sampler.conditioning_key not in {'hybrid', 'concat'}:
# Dummy zero conditioning if we're not using inpainting model.
return latent_image.new_zeros(latent_image.shape[0], 5, 1, 1)
# Handle the different mask inputs
if image_mask is not None:
if torch.is_tensor(image_mask):
conditioning_mask = image_mask
else:
conditioning_mask = np.array(image_mask.convert("L"))
conditioning_mask = conditioning_mask.astype(np.float32) / 255.0
conditioning_mask = torch.from_numpy(conditioning_mask[None, None])
# Inpainting model uses a discretized mask as input, so we round to either 1.0 or 0.0
conditioning_mask = torch.round(conditioning_mask)
else:
conditioning_mask = source_image.new_ones(1, 1, *source_image.shape[-2:])
# Create another latent image, this time with a masked version of the original input.
# Smoothly interpolate between the masked and unmasked latent conditioning image using a parameter.
conditioning_mask = conditioning_mask.to(source_image.device).to(source_image.dtype)
conditioning_image = torch.lerp(
source_image,
source_image * (1.0 - conditioning_mask),
getattr(self, "inpainting_mask_weight", shared.opts.inpainting_mask_weight)
)
# Encode the new masked image using first stage of network.
conditioning_image = self.sd_model.get_first_stage_encoding(self.sd_model.encode_first_stage(conditioning_image))
# Create the concatenated conditioning tensor to be fed to `c_concat`
conditioning_mask = torch.nn.functional.interpolate(conditioning_mask, size=latent_image.shape[-2:])
conditioning_mask = conditioning_mask.expand(conditioning_image.shape[0], -1, -1, -1)
image_conditioning = torch.cat([conditioning_mask, conditioning_image], dim=1)
image_conditioning = image_conditioning.to(shared.device).type(self.sd_model.dtype)
return image_conditioning
def init(self, all_prompts, all_seeds, all_subseeds):
pass
def sample(self, conditioning, unconditional_conditioning, seeds, subseeds, subseed_strength, prompts):
raise NotImplementedError()
def close(self):
self.sd_model = None
self.sampler = None
class Processed:
def __init__(self, p: StableDiffusionProcessing, images_list, seed=-1, info="", subseed=None, all_prompts=None, all_seeds=None, all_subseeds=None, index_of_first_image=0, infotexts=None):
self.images = images_list
self.prompt = p.prompt
self.negative_prompt = p.negative_prompt
self.seed = seed
self.subseed = subseed
self.subseed_strength = p.subseed_strength
self.info = info
self.width = p.width
self.height = p.height
self.sampler_index = p.sampler_index
self.sampler = sd_samplers.samplers[p.sampler_index].name
self.cfg_scale = p.cfg_scale
self.steps = p.steps
self.batch_size = p.batch_size
self.restore_faces = p.restore_faces
self.face_restoration_model = opts.face_restoration_model if p.restore_faces else None
self.sd_model_hash = shared.sd_model.sd_model_hash
self.seed_resize_from_w = p.seed_resize_from_w
self.seed_resize_from_h = p.seed_resize_from_h
self.denoising_strength = getattr(p, 'denoising_strength', None)
self.extra_generation_params = p.extra_generation_params
self.index_of_first_image = index_of_first_image
self.styles = p.styles
self.job_timestamp = state.job_timestamp
self.clip_skip = opts.CLIP_stop_at_last_layers
self.eta = p.eta
self.ddim_discretize = p.ddim_discretize
self.s_churn = p.s_churn
self.s_tmin = p.s_tmin
self.s_tmax = p.s_tmax
self.s_noise = p.s_noise
self.sampler_noise_scheduler_override = p.sampler_noise_scheduler_override
self.prompt = self.prompt if type(self.prompt) != list else self.prompt[0]
self.negative_prompt = self.negative_prompt if type(self.negative_prompt) != list else self.negative_prompt[0]
self.seed = int(self.seed if type(self.seed) != list else self.seed[0]) if self.seed is not None else -1
self.subseed = int(self.subseed if type(self.subseed) != list else self.subseed[0]) if self.subseed is not None else -1
self.all_prompts = all_prompts or [self.prompt]
self.all_seeds = all_seeds or [self.seed]
self.all_subseeds = all_subseeds or [self.subseed]
self.infotexts = infotexts or [info]
def js(self):
obj = {
"prompt": self.prompt,
"all_prompts": self.all_prompts,
"negative_prompt": self.negative_prompt,
"seed": self.seed,
"all_seeds": self.all_seeds,
"subseed": self.subseed,
"all_subseeds": self.all_subseeds,
"subseed_strength": self.subseed_strength,
"width": self.width,
"height": self.height,
"sampler_index": self.sampler_index,
"sampler": self.sampler,
"cfg_scale": self.cfg_scale,
"steps": self.steps,
"batch_size": self.batch_size,
"restore_faces": self.restore_faces,
"face_restoration_model": self.face_restoration_model,
"sd_model_hash": self.sd_model_hash,
"seed_resize_from_w": self.seed_resize_from_w,
"seed_resize_from_h": self.seed_resize_from_h,
"denoising_strength": self.denoising_strength,
"extra_generation_params": self.extra_generation_params,
"index_of_first_image": self.index_of_first_image,
"infotexts": self.infotexts,
"styles": self.styles,
"job_timestamp": self.job_timestamp,
"clip_skip": self.clip_skip,
}
return json.dumps(obj)
def infotext(self, p: StableDiffusionProcessing, index):
return create_infotext(p, self.all_prompts, self.all_seeds, self.all_subseeds, comments=[], position_in_batch=index % self.batch_size, iteration=index // self.batch_size)
# from https://discuss.pytorch.org/t/help-regarding-slerp-function-for-generative-model-sampling/32475/3
def slerp(val, low, high):
low_norm = low/torch.norm(low, dim=1, keepdim=True)
high_norm = high/torch.norm(high, dim=1, keepdim=True)
dot = (low_norm*high_norm).sum(1)
if dot.mean() > 0.9995:
return low * val + high * (1 - val)
omega = torch.acos(dot)
so = torch.sin(omega)
res = (torch.sin((1.0-val)*omega)/so).unsqueeze(1)*low + (torch.sin(val*omega)/so).unsqueeze(1) * high
return res
def create_random_tensors(shape, seeds, subseeds=None, subseed_strength=0.0, seed_resize_from_h=0, seed_resize_from_w=0, p=None):
xs = []
# if we have multiple seeds, this means we are working with batch size>1; this then
# enables the generation of additional tensors with noise that the sampler will use during its processing.
# Using those pre-generated tensors instead of simple torch.randn allows a batch with seeds [100, 101] to
# produce the same images as with two batches [100], [101].
if p is not None and p.sampler is not None and (len(seeds) > 1 and opts.enable_batch_seeds or opts.eta_noise_seed_delta > 0):
sampler_noises = [[] for _ in range(p.sampler.number_of_needed_noises(p))]
else:
sampler_noises = None
for i, seed in enumerate(seeds):
noise_shape = shape if seed_resize_from_h <= 0 or seed_resize_from_w <= 0 else (shape[0], seed_resize_from_h//8, seed_resize_from_w//8)
subnoise = None
if subseeds is not None:
subseed = 0 if i >= len(subseeds) else subseeds[i]
subnoise = devices.randn(subseed, noise_shape)
# randn results depend on device; gpu and cpu get different results for same seed;
# the way I see it, it's better to do this on CPU, so that everyone gets same result;
# but the original script had it like this, so I do not dare change it for now because
# it will break everyone's seeds.
noise = devices.randn(seed, noise_shape)
if subnoise is not None:
noise = slerp(subseed_strength, noise, subnoise)
if noise_shape != shape:
x = devices.randn(seed, shape)
dx = (shape[2] - noise_shape[2]) // 2
dy = (shape[1] - noise_shape[1]) // 2
w = noise_shape[2] if dx >= 0 else noise_shape[2] + 2 * dx
h = noise_shape[1] if dy >= 0 else noise_shape[1] + 2 * dy
tx = 0 if dx < 0 else dx
ty = 0 if dy < 0 else dy
dx = max(-dx, 0)
dy = max(-dy, 0)
x[:, ty:ty+h, tx:tx+w] = noise[:, dy:dy+h, dx:dx+w]
noise = x
if sampler_noises is not None:
cnt = p.sampler.number_of_needed_noises(p)
if opts.eta_noise_seed_delta > 0:
torch.manual_seed(seed + opts.eta_noise_seed_delta)
for j in range(cnt):
sampler_noises[j].append(devices.randn_without_seed(tuple(noise_shape)))
xs.append(noise)
if sampler_noises is not None:
p.sampler.sampler_noises = [torch.stack(n).to(shared.device) for n in sampler_noises]
x = torch.stack(xs).to(shared.device)
return x
def decode_first_stage(model, x):
with devices.autocast(disable=x.dtype == devices.dtype_vae):
x = model.decode_first_stage(x)
return x
def get_fixed_seed(seed):
if seed is None or seed == '' or seed == -1:
return int(random.randrange(4294967294))
return seed
def fix_seed(p):
p.seed = get_fixed_seed(p.seed)
p.subseed = get_fixed_seed(p.subseed)
def create_infotext(p, all_prompts, all_seeds, all_subseeds, comments, iteration=0, position_in_batch=0):
index = position_in_batch + iteration * p.batch_size
clip_skip = getattr(p, 'clip_skip', opts.CLIP_stop_at_last_layers)
generation_params = {
"Steps": p.steps,
"Sampler": get_correct_sampler(p)[p.sampler_index].name,
"CFG scale": p.cfg_scale,
"Seed": all_seeds[index],
"Face restoration": (opts.face_restoration_model if p.restore_faces else None),
"Size": f"{p.width}x{p.height}",
"Model hash": getattr(p, 'sd_model_hash', None if not opts.add_model_hash_to_info or not shared.sd_model.sd_model_hash else shared.sd_model.sd_model_hash),
"Model": (None if not opts.add_model_name_to_info or not shared.sd_model.sd_checkpoint_info.model_name else shared.sd_model.sd_checkpoint_info.model_name.replace(',', '').replace(':', '')),
"Hypernet": (None if shared.loaded_hypernetwork is None else shared.loaded_hypernetwork.name),
"Hypernet strength": (None if shared.loaded_hypernetwork is None or shared.opts.sd_hypernetwork_strength >= 1 else shared.opts.sd_hypernetwork_strength),
"Batch size": (None if p.batch_size < 2 else p.batch_size),
"Batch pos": (None if p.batch_size < 2 else position_in_batch),
"Variation seed": (None if p.subseed_strength == 0 else all_subseeds[index]),
"Variation seed strength": (None if p.subseed_strength == 0 else p.subseed_strength),
"Seed resize from": (None if p.seed_resize_from_w == 0 or p.seed_resize_from_h == 0 else f"{p.seed_resize_from_w}x{p.seed_resize_from_h}"),
"Denoising strength": getattr(p, 'denoising_strength', None),
"Eta": (None if p.sampler is None or p.sampler.eta == p.sampler.default_eta else p.sampler.eta),
"Clip skip": None if clip_skip <= 1 else clip_skip,
"ENSD": None if opts.eta_noise_seed_delta == 0 else opts.eta_noise_seed_delta,
}
generation_params.update(p.extra_generation_params)
generation_params_text = ", ".join([k if k == v else f'{k}: {generation_parameters_copypaste.quote(v)}' for k, v in generation_params.items() if v is not None])
negative_prompt_text = "\nNegative prompt: " + p.negative_prompt if p.negative_prompt else ""
return f"{all_prompts[index]}{negative_prompt_text}\n{generation_params_text}".strip()
def process_images(p: StableDiffusionProcessing) -> Processed:
stored_opts = {k: opts.data[k] for k in p.override_settings.keys()}
try:
for k, v in p.override_settings.items():
opts.data[k] = v # we don't call onchange for simplicity which makes changing model, hypernet impossible
res = process_images_inner(p)
finally:
for k, v in stored_opts.items():
opts.data[k] = v
return res
def process_images_inner(p: StableDiffusionProcessing) -> Processed:
"""this is the main loop that both txt2img and img2img use; it calls func_init once inside all the scopes and func_sample once per batch"""
if type(p.prompt) == list:
assert(len(p.prompt) > 0)
else:
assert p.prompt is not None
with open(os.path.join(shared.script_path, "params.txt"), "w", encoding="utf8") as file:
processed = Processed(p, [], p.seed, "")
file.write(processed.infotext(p, 0))
devices.torch_gc()
seed = get_fixed_seed(p.seed)
subseed = get_fixed_seed(p.subseed)
modules.sd_hijack.model_hijack.apply_circular(p.tiling)
modules.sd_hijack.model_hijack.clear_comments()
comments = {}
shared.prompt_styles.apply_styles(p)
if type(p.prompt) == list:
p.all_prompts = p.prompt
else:
p.all_prompts = p.batch_size * p.n_iter * [p.prompt]
if type(seed) == list:
p.all_seeds = seed
else:
p.all_seeds = [int(seed) + (x if p.subseed_strength == 0 else 0) for x in range(len(p.all_prompts))]
if type(subseed) == list:
p.all_subseeds = subseed
else:
p.all_subseeds = [int(subseed) + x for x in range(len(p.all_prompts))]
def infotext(iteration=0, position_in_batch=0):
return create_infotext(p, p.all_prompts, p.all_seeds, p.all_subseeds, comments, iteration, position_in_batch)
if os.path.exists(cmd_opts.embeddings_dir) and not p.do_not_reload_embeddings:
model_hijack.embedding_db.load_textual_inversion_embeddings()
if p.scripts is not None:
p.scripts.process(p)
infotexts = []
output_images = []
with torch.no_grad(), p.sd_model.ema_scope():
with devices.autocast():
p.init(p.all_prompts, p.all_seeds, p.all_subseeds)
if state.job_count == -1:
state.job_count = p.n_iter
for n in range(p.n_iter):
if state.skipped:
state.skipped = False
if state.interrupted:
break
prompts = p.all_prompts[n * p.batch_size:(n + 1) * p.batch_size]
seeds = p.all_seeds[n * p.batch_size:(n + 1) * p.batch_size]
subseeds = p.all_subseeds[n * p.batch_size:(n + 1) * p.batch_size]
if len(prompts) == 0:
break
with devices.autocast():
uc = prompt_parser.get_learned_conditioning(shared.sd_model, len(prompts) * [p.negative_prompt], p.steps)
c = prompt_parser.get_multicond_learned_conditioning(shared.sd_model, prompts, p.steps)
if len(model_hijack.comments) > 0:
for comment in model_hijack.comments:
comments[comment] = 1
if p.n_iter > 1:
shared.state.job = f"Batch {n+1} out of {p.n_iter}"
with devices.autocast():
samples_ddim = p.sample(conditioning=c, unconditional_conditioning=uc, seeds=seeds, subseeds=subseeds, subseed_strength=p.subseed_strength, prompts=prompts)
samples_ddim = samples_ddim.to(devices.dtype_vae)
x_samples_ddim = decode_first_stage(p.sd_model, samples_ddim)
x_samples_ddim = torch.clamp((x_samples_ddim + 1.0) / 2.0, min=0.0, max=1.0)
del samples_ddim
if shared.cmd_opts.lowvram or shared.cmd_opts.medvram:
lowvram.send_everything_to_cpu()
devices.torch_gc()
if opts.filter_nsfw:
import modules.safety as safety
x_samples_ddim = modules.safety.censor_batch(x_samples_ddim)
for i, x_sample in enumerate(x_samples_ddim):
x_sample = 255. * np.moveaxis(x_sample.cpu().numpy(), 0, 2)
x_sample = x_sample.astype(np.uint8)
if p.restore_faces:
if opts.save and not p.do_not_save_samples and opts.save_images_before_face_restoration:
images.save_image(Image.fromarray(x_sample), p.outpath_samples, "", seeds[i], prompts[i], opts.samples_format, info=infotext(n, i), p=p, suffix="-before-face-restoration")
devices.torch_gc()
x_sample = modules.face_restoration.restore_faces(x_sample)
devices.torch_gc()
image = Image.fromarray(x_sample)
if p.color_corrections is not None and i < len(p.color_corrections):
if opts.save and not p.do_not_save_samples and opts.save_images_before_color_correction:
image_without_cc = apply_overlay(image, p.paste_to, i, p.overlay_images)
images.save_image(image_without_cc, p.outpath_samples, "", seeds[i], prompts[i], opts.samples_format, info=infotext(n, i), p=p, suffix="-before-color-correction")
image = apply_color_correction(p.color_corrections[i], image)
image = apply_overlay(image, p.paste_to, i, p.overlay_images)
if opts.samples_save and not p.do_not_save_samples:
images.save_image(image, p.outpath_samples, "", seeds[i], prompts[i], opts.samples_format, info=infotext(n, i), p=p)
text = infotext(n, i)
infotexts.append(text)
if opts.enable_pnginfo:
image.info["parameters"] = text
output_images.append(image)
del x_samples_ddim
devices.torch_gc()
state.nextjob()
p.color_corrections = None
index_of_first_image = 0
unwanted_grid_because_of_img_count = len(output_images) < 2 and opts.grid_only_if_multiple
if (opts.return_grid or opts.grid_save) and not p.do_not_save_grid and not unwanted_grid_because_of_img_count:
grid = images.image_grid(output_images, p.batch_size)
if opts.return_grid:
text = infotext()
infotexts.insert(0, text)
if opts.enable_pnginfo:
grid.info["parameters"] = text
output_images.insert(0, grid)
index_of_first_image = 1
if opts.grid_save:
images.save_image(grid, p.outpath_grids, "grid", p.all_seeds[0], p.all_prompts[0], opts.grid_format, info=infotext(), short_filename=not opts.grid_extended_filename, p=p, grid=True)
devices.torch_gc()
res = Processed(p, output_images, p.all_seeds[0], infotext() + "".join(["\n\n" + x for x in comments]), subseed=p.all_subseeds[0], all_prompts=p.all_prompts, all_seeds=p.all_seeds, all_subseeds=p.all_subseeds, index_of_first_image=index_of_first_image, infotexts=infotexts)
if p.scripts is not None:
p.scripts.postprocess(p, res)
return res
class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing):
sampler = None
def __init__(self, enable_hr: bool=False, denoising_strength: float=0.75, firstphase_width: int=0, firstphase_height: int=0, **kwargs):
super().__init__(**kwargs)
self.enable_hr = enable_hr
self.denoising_strength = denoising_strength
self.firstphase_width = firstphase_width
self.firstphase_height = firstphase_height
self.truncate_x = 0
self.truncate_y = 0
def init(self, all_prompts, all_seeds, all_subseeds):
if self.enable_hr:
if state.job_count == -1:
state.job_count = self.n_iter * 2
else:
state.job_count = state.job_count * 2
self.extra_generation_params["First pass size"] = f"{self.firstphase_width}x{self.firstphase_height}"
if self.firstphase_width == 0 or self.firstphase_height == 0:
desired_pixel_count = 512 * 512
actual_pixel_count = self.width * self.height
scale = math.sqrt(desired_pixel_count / actual_pixel_count)
self.firstphase_width = math.ceil(scale * self.width / 64) * 64
self.firstphase_height = math.ceil(scale * self.height / 64) * 64
firstphase_width_truncated = int(scale * self.width)
firstphase_height_truncated = int(scale * self.height)
else:
width_ratio = self.width / self.firstphase_width
height_ratio = self.height / self.firstphase_height
if width_ratio > height_ratio:
firstphase_width_truncated = self.firstphase_width
firstphase_height_truncated = self.firstphase_width * self.height / self.width
else:
firstphase_width_truncated = self.firstphase_height * self.width / self.height
firstphase_height_truncated = self.firstphase_height
self.truncate_x = int(self.firstphase_width - firstphase_width_truncated) // opt_f
self.truncate_y = int(self.firstphase_height - firstphase_height_truncated) // opt_f
def sample(self, conditioning, unconditional_conditioning, seeds, subseeds, subseed_strength, prompts):
self.sampler = sd_samplers.create_sampler_with_index(sd_samplers.samplers, self.sampler_index, self.sd_model)
if not self.enable_hr:
x = create_random_tensors([opt_C, self.height // opt_f, self.width // opt_f], seeds=seeds, subseeds=subseeds, subseed_strength=self.subseed_strength, seed_resize_from_h=self.seed_resize_from_h, seed_resize_from_w=self.seed_resize_from_w, p=self)
samples = self.sampler.sample(self, x, conditioning, unconditional_conditioning, image_conditioning=self.txt2img_image_conditioning(x))
return samples
x = create_random_tensors([opt_C, self.firstphase_height // opt_f, self.firstphase_width // opt_f], seeds=seeds, subseeds=subseeds, subseed_strength=self.subseed_strength, seed_resize_from_h=self.seed_resize_from_h, seed_resize_from_w=self.seed_resize_from_w, p=self)
samples = self.sampler.sample(self, x, conditioning, unconditional_conditioning, image_conditioning=self.txt2img_image_conditioning(x, self.firstphase_width, self.firstphase_height))
samples = samples[:, :, self.truncate_y//2:samples.shape[2]-self.truncate_y//2, self.truncate_x//2:samples.shape[3]-self.truncate_x//2]
"""saves image before applying hires fix, if enabled in options; takes as an arguyment either an image or batch with latent space images"""
def save_intermediate(image, index):
if not opts.save or self.do_not_save_samples or not opts.save_images_before_highres_fix:
return
if not isinstance(image, Image.Image):
image = sd_samplers.sample_to_image(image, index)
images.save_image(image, self.outpath_samples, "", seeds[index], prompts[index], opts.samples_format, suffix="-before-highres-fix")
if opts.use_scale_latent_for_hires_fix:
samples = torch.nn.functional.interpolate(samples, size=(self.height // opt_f, self.width // opt_f), mode="bilinear")
# Avoid making the inpainting conditioning unless necessary as
# this does need some extra compute to decode / encode the image again.
if getattr(self, "inpainting_mask_weight", shared.opts.inpainting_mask_weight) < 1.0:
image_conditioning = self.img2img_image_conditioning(decode_first_stage(self.sd_model, samples), samples)
else:
image_conditioning = self.txt2img_image_conditioning(samples)
for i in range(samples.shape[0]):
save_intermediate(samples, i)
else:
decoded_samples = decode_first_stage(self.sd_model, samples)
lowres_samples = torch.clamp((decoded_samples + 1.0) / 2.0, min=0.0, max=1.0)
batch_images = []
for i, x_sample in enumerate(lowres_samples):
x_sample = 255. * np.moveaxis(x_sample.cpu().numpy(), 0, 2)
x_sample = x_sample.astype(np.uint8)
image = Image.fromarray(x_sample)
save_intermediate(image, i)
image = images.resize_image(0, image, self.width, self.height)
image = np.array(image).astype(np.float32) / 255.0
image = np.moveaxis(image, 2, 0)
batch_images.append(image)
decoded_samples = torch.from_numpy(np.array(batch_images))
decoded_samples = decoded_samples.to(shared.device)
decoded_samples = 2. * decoded_samples - 1.
samples = self.sd_model.get_first_stage_encoding(self.sd_model.encode_first_stage(decoded_samples))
image_conditioning = self.img2img_image_conditioning(decoded_samples, samples)
shared.state.nextjob()
self.sampler = sd_samplers.create_sampler_with_index(sd_samplers.samplers, self.sampler_index, self.sd_model)
noise = create_random_tensors(samples.shape[1:], seeds=seeds, subseeds=subseeds, subseed_strength=subseed_strength, seed_resize_from_h=self.seed_resize_from_h, seed_resize_from_w=self.seed_resize_from_w, p=self)
# GC now before running the next img2img to prevent running out of memory
x = None
devices.torch_gc()
samples = self.sampler.sample_img2img(self, samples, noise, conditioning, unconditional_conditioning, steps=self.steps, image_conditioning=image_conditioning)
return samples
class StableDiffusionProcessingImg2Img(StableDiffusionProcessing):
sampler = None
def __init__(self, init_images: list=None, resize_mode: int=0, denoising_strength: float=0.75, mask: Any=None, mask_blur: int=4, inpainting_fill: int=0, inpaint_full_res: bool=True, inpaint_full_res_padding: int=0, inpainting_mask_invert: int=0, **kwargs):
super().__init__(**kwargs)
self.init_images = init_images
self.resize_mode: int = resize_mode
self.denoising_strength: float = denoising_strength
self.init_latent = None
self.image_mask = mask
#self.image_unblurred_mask = None
self.latent_mask = None
self.mask_for_overlay = None
self.mask_blur = mask_blur
self.inpainting_fill = inpainting_fill
self.inpaint_full_res = inpaint_full_res
self.inpaint_full_res_padding = inpaint_full_res_padding
self.inpainting_mask_invert = inpainting_mask_invert
self.mask = None
self.nmask = None
self.image_conditioning = None
def init(self, all_prompts, all_seeds, all_subseeds):
self.sampler = sd_samplers.create_sampler_with_index(sd_samplers.samplers_for_img2img, self.sampler_index, self.sd_model)
crop_region = None
if self.image_mask is not None:
self.image_mask = self.image_mask.convert('L')
if self.inpainting_mask_invert:
self.image_mask = ImageOps.invert(self.image_mask)
#self.image_unblurred_mask = self.image_mask
if self.mask_blur > 0:
self.image_mask = self.image_mask.filter(ImageFilter.GaussianBlur(self.mask_blur))
if self.inpaint_full_res:
self.mask_for_overlay = self.image_mask
mask = self.image_mask.convert('L')
crop_region = masking.get_crop_region(np.array(mask), self.inpaint_full_res_padding)
crop_region = masking.expand_crop_region(crop_region, self.width, self.height, mask.width, mask.height)
x1, y1, x2, y2 = crop_region
mask = mask.crop(crop_region)
self.image_mask = images.resize_image(2, mask, self.width, self.height)
self.paste_to = (x1, y1, x2-x1, y2-y1)
else:
self.image_mask = images.resize_image(self.resize_mode, self.image_mask, self.width, self.height)
np_mask = np.array(self.image_mask)
np_mask = np.clip((np_mask.astype(np.float32)) * 2, 0, 255).astype(np.uint8)
self.mask_for_overlay = Image.fromarray(np_mask)
self.overlay_images = []
latent_mask = self.latent_mask if self.latent_mask is not None else self.image_mask
add_color_corrections = opts.img2img_color_correction and self.color_corrections is None
if add_color_corrections:
self.color_corrections = []
imgs = []
for img in self.init_images:
image = img.convert("RGB")
if crop_region is None:
image = images.resize_image(self.resize_mode, image, self.width, self.height)
if self.image_mask is not None:
image_masked = Image.new('RGBa', (image.width, image.height))
image_masked.paste(image.convert("RGBA").convert("RGBa"), mask=ImageOps.invert(self.mask_for_overlay.convert('L')))
self.overlay_images.append(image_masked.convert('RGBA'))
if crop_region is not None:
image = image.crop(crop_region)
image = images.resize_image(2, image, self.width, self.height)
if self.image_mask is not None:
if self.inpainting_fill != 1:
image = masking.fill(image, latent_mask)
if add_color_corrections:
self.color_corrections.append(setup_color_correction(image))
image = np.array(image).astype(np.float32) / 255.0
image = np.moveaxis(image, 2, 0)
imgs.append(image)
if len(imgs) == 1:
batch_images = np.expand_dims(imgs[0], axis=0).repeat(self.batch_size, axis=0)
if self.overlay_images is not None:
self.overlay_images = self.overlay_images * self.batch_size
if self.color_corrections is not None and len(self.color_corrections) == 1:
self.color_corrections = self.color_corrections * self.batch_size
elif len(imgs) <= self.batch_size:
self.batch_size = len(imgs)
batch_images = np.array(imgs)
else:
raise RuntimeError(f"bad number of images passed: {len(imgs)}; expecting {self.batch_size} or less")
image = torch.from_numpy(batch_images)
image = 2. * image - 1.
image = image.to(shared.device)
self.init_latent = self.sd_model.get_first_stage_encoding(self.sd_model.encode_first_stage(image))
if self.image_mask is not None:
init_mask = latent_mask
latmask = init_mask.convert('RGB').resize((self.init_latent.shape[3], self.init_latent.shape[2]))
latmask = np.moveaxis(np.array(latmask, dtype=np.float32), 2, 0) / 255
latmask = latmask[0]
latmask = np.around(latmask)
latmask = np.tile(latmask[None], (4, 1, 1))
self.mask = torch.asarray(1.0 - latmask).to(shared.device).type(self.sd_model.dtype)
self.nmask = torch.asarray(latmask).to(shared.device).type(self.sd_model.dtype)
# this needs to be fixed to be done in sample() using actual seeds for batches
if self.inpainting_fill == 2:
self.init_latent = self.init_latent * self.mask + create_random_tensors(self.init_latent.shape[1:], all_seeds[0:self.init_latent.shape[0]]) * self.nmask
elif self.inpainting_fill == 3:
self.init_latent = self.init_latent * self.mask
self.image_conditioning = self.img2img_image_conditioning(image, self.init_latent, self.image_mask)
def sample(self, conditioning, unconditional_conditioning, seeds, subseeds, subseed_strength, prompts):
x = create_random_tensors([opt_C, self.height // opt_f, self.width // opt_f], seeds=seeds, subseeds=subseeds, subseed_strength=self.subseed_strength, seed_resize_from_h=self.seed_resize_from_h, seed_resize_from_w=self.seed_resize_from_w, p=self)
samples = self.sampler.sample_img2img(self, self.init_latent, x, conditioning, unconditional_conditioning, image_conditioning=self.image_conditioning)
if self.mask is not None:
samples = samples * self.nmask + self.init_latent * self.mask
del x
devices.torch_gc()
return samples
<|code_end|>
|
[Bug]: "1" checkpoint caching is useless
### Is there an existing issue for this?
- [X] I have searched the existing issues and checked the recent builds/commits
### What happened?
"1" checkpoint caching is useless. When you set the webui to keep n caches, only n-1 is actually useful. This happens because model is cached after loading it. When you set caching to 1, what's in the cache is really just your currently loaded checkpoint. When you load another one, the other checkpoint can't possibly be in the cache, so it's loaded and inserted into cache dict. Then, the cache dict now has 2 entries, and so the oldest one will be discarded. Leaving you with 1 cache of the current model, again. So when caching is set to 1, there's practically no caching. There's always the current model taking up 1 cache "space" (it's not duplicated in memory though).
### Steps to reproduce the problem
1. Set caching to 1
2. Load a model
3. Load another model
4. Load the first model
### What should have happened?
Step 4 should be loading the first model from cache, but it doesn't. Because the current model in cache is from step 3.
### Commit where the problem happens
17a2076f72562b428052ee3fc8c43d19c03ecd1e
### What platforms do you use to access UI ?
Other/Cloud
### What browsers do you use to access the UI ?
Google Chrome
### Command Line Arguments
```Shell
--autolaunch --ckpt {workspace3}/novelai/final_pruned.ckpt --vae-path {workspace3}/novelai/animevae.pt --deepdanbooru --disable-safe-unpickle --no-half-vae --xformers
```
### Additional information, context and logs
Caching should keep cache of previous models, not current model.
<details>
<summary>my log</summary>
```
To create a public link, set `share=True` in `launch()`.
(CheckpointInfo(filename='/content/nai/stable-diffusion-webui/models/Stable-diffusion/novelai/final_pruned.ckpt', title='novelai/final_pruned.ckpt [925997e9]', hash='925997e9', model_name='novelai_final_pruned', config='/content/nai/stable-diffusion-webui/repositories/stable-diffusion/configs/stable-diffusion/v1-inference.yaml'), '/content/nai/stable-diffusion-webui/models/Stable-diffusion/novelai/animevae.pt')
{(CheckpointInfo(filename='/content/nai/stable-diffusion-webui/models/Stable-diffusion/novelai/final_pruned.ckpt', title='novelai/final_pruned.ckpt [925997e9]', hash='925997e9', model_name='novelai_final_pruned', config='/content/nai/stable-diffusion-webui/repositories/stable-diffusion/configs/stable-diffusion/v1-inference.yaml'), '/content/nai/stable-diffusion-webui/models/Stable-diffusion/novelai/animevae.pt')}
Loading weights [925997e9] with animevae VAE from cache
Applying xformers cross attention optimization.
Weights loaded.
(CheckpointInfo(filename='/content/nai/stable-diffusion-webui/models/Stable-diffusion/novelai/final_pruned.ckpt', title='novelai/final_pruned.ckpt [925997e9]', hash='925997e9', model_name='novelai_final_pruned', config='/content/nai/stable-diffusion-webui/repositories/stable-diffusion/configs/stable-diffusion/v1-inference.yaml'), None)
{(CheckpointInfo(filename='/content/nai/stable-diffusion-webui/models/Stable-diffusion/novelai/final_pruned.ckpt', title='novelai/final_pruned.ckpt [925997e9]', hash='925997e9', model_name='novelai_final_pruned', config='/content/nai/stable-diffusion-webui/repositories/stable-diffusion/configs/stable-diffusion/v1-inference.yaml'), '/content/nai/stable-diffusion-webui/models/Stable-diffusion/novelai/animevae.pt')}
Loading weights [925997e9] from /content/nai/stable-diffusion-webui/models/Stable-diffusion/novelai/final_pruned.ckpt
{(CheckpointInfo(filename='/content/nai/stable-diffusion-webui/models/Stable-diffusion/novelai/final_pruned.ckpt', title='novelai/final_pruned.ckpt [925997e9]', hash='925997e9', model_name='novelai_final_pruned', config='/content/nai/stable-diffusion-webui/repositories/stable-diffusion/configs/stable-diffusion/v1-inference.yaml'), None)}
Applying xformers cross attention optimization.
Weights loaded.
(CheckpointInfo(filename='/content/nai/stable-diffusion-webui/models/Stable-diffusion/novelai/final_pruned.ckpt', title='novelai/final_pruned.ckpt [925997e9]', hash='925997e9', model_name='novelai_final_pruned', config='/content/nai/stable-diffusion-webui/repositories/stable-diffusion/configs/stable-diffusion/v1-inference.yaml'), '/content/nai/stable-diffusion-webui/models/Stable-diffusion/novelai/animevae.pt')
{(CheckpointInfo(filename='/content/nai/stable-diffusion-webui/models/Stable-diffusion/novelai/final_pruned.ckpt', title='novelai/final_pruned.ckpt [925997e9]', hash='925997e9', model_name='novelai_final_pruned', config='/content/nai/stable-diffusion-webui/repositories/stable-diffusion/configs/stable-diffusion/v1-inference.yaml'), None)}
Loading weights [925997e9] from /content/nai/stable-diffusion-webui/models/Stable-diffusion/novelai/final_pruned.ckpt
Loading VAE weights from: /content/nai/stable-diffusion-webui/models/Stable-diffusion/novelai/animevae.pt
{(CheckpointInfo(filename='/content/nai/stable-diffusion-webui/models/Stable-diffusion/novelai/final_pruned.ckpt', title='novelai/final_pruned.ckpt [925997e9]', hash='925997e9', model_name='novelai_final_pruned', config='/content/nai/stable-diffusion-webui/repositories/stable-diffusion/configs/stable-diffusion/v1-inference.yaml'), '/content/nai/stable-diffusion-webui/models/Stable-diffusion/novelai/animevae.pt')}
Applying xformers cross attention optimization.
Weights loaded.
(CheckpointInfo(filename='/content/nai/stable-diffusion-webui/models/Stable-diffusion/novelai/final_pruned.ckpt', title='novelai/final_pruned.ckpt [925997e9]', hash='925997e9', model_name='novelai_final_pruned', config='/content/nai/stable-diffusion-webui/repositories/stable-diffusion/configs/stable-diffusion/v1-inference.yaml'), None)
{(CheckpointInfo(filename='/content/nai/stable-diffusion-webui/models/Stable-diffusion/novelai/final_pruned.ckpt', title='novelai/final_pruned.ckpt [925997e9]', hash='925997e9', model_name='novelai_final_pruned', config='/content/nai/stable-diffusion-webui/repositories/stable-diffusion/configs/stable-diffusion/v1-inference.yaml'), '/content/nai/stable-diffusion-webui/models/Stable-diffusion/novelai/animevae.pt')}
Loading weights [925997e9] from /content/nai/stable-diffusion-webui/models/Stable-diffusion/novelai/final_pruned.ckpt
2022-10-31T08:19:07Z INF Initiating graceful shutdown due to signal interrupt ...
```
</details>
|
modules/sd_models.py
<|code_start|>import collections
import os.path
import sys
import gc
from collections import namedtuple
import torch
import re
from omegaconf import OmegaConf
from ldm.util import instantiate_from_config
from modules import shared, modelloader, devices, script_callbacks, sd_vae
from modules.paths import models_path
from modules.sd_hijack_inpainting import do_inpainting_hijack, should_hijack_inpainting
model_dir = "Stable-diffusion"
model_path = os.path.abspath(os.path.join(models_path, model_dir))
CheckpointInfo = namedtuple("CheckpointInfo", ['filename', 'title', 'hash', 'model_name', 'config'])
checkpoints_list = {}
checkpoints_loaded = collections.OrderedDict()
try:
# this silences the annoying "Some weights of the model checkpoint were not used when initializing..." message at start.
from transformers import logging, CLIPModel
logging.set_verbosity_error()
except Exception:
pass
def setup_model():
if not os.path.exists(model_path):
os.makedirs(model_path)
list_models()
def checkpoint_tiles():
convert = lambda name: int(name) if name.isdigit() else name.lower()
alphanumeric_key = lambda key: [convert(c) for c in re.split('([0-9]+)', key)]
return sorted([x.title for x in checkpoints_list.values()], key = alphanumeric_key)
def list_models():
checkpoints_list.clear()
model_list = modelloader.load_models(model_path=model_path, command_path=shared.cmd_opts.ckpt_dir, ext_filter=[".ckpt"])
def modeltitle(path, shorthash):
abspath = os.path.abspath(path)
if shared.cmd_opts.ckpt_dir is not None and abspath.startswith(shared.cmd_opts.ckpt_dir):
name = abspath.replace(shared.cmd_opts.ckpt_dir, '')
elif abspath.startswith(model_path):
name = abspath.replace(model_path, '')
else:
name = os.path.basename(path)
if name.startswith("\\") or name.startswith("/"):
name = name[1:]
shortname = os.path.splitext(name.replace("/", "_").replace("\\", "_"))[0]
return f'{name} [{shorthash}]', shortname
cmd_ckpt = shared.cmd_opts.ckpt
if os.path.exists(cmd_ckpt):
h = model_hash(cmd_ckpt)
title, short_model_name = modeltitle(cmd_ckpt, h)
checkpoints_list[title] = CheckpointInfo(cmd_ckpt, title, h, short_model_name, shared.cmd_opts.config)
shared.opts.data['sd_model_checkpoint'] = title
elif cmd_ckpt is not None and cmd_ckpt != shared.default_sd_model_file:
print(f"Checkpoint in --ckpt argument not found (Possible it was moved to {model_path}: {cmd_ckpt}", file=sys.stderr)
for filename in model_list:
h = model_hash(filename)
title, short_model_name = modeltitle(filename, h)
basename, _ = os.path.splitext(filename)
config = basename + ".yaml"
if not os.path.exists(config):
config = shared.cmd_opts.config
checkpoints_list[title] = CheckpointInfo(filename, title, h, short_model_name, config)
def get_closet_checkpoint_match(searchString):
applicable = sorted([info for info in checkpoints_list.values() if searchString in info.title], key = lambda x:len(x.title))
if len(applicable) > 0:
return applicable[0]
return None
def model_hash(filename):
try:
with open(filename, "rb") as file:
import hashlib
m = hashlib.sha256()
file.seek(0x100000)
m.update(file.read(0x10000))
return m.hexdigest()[0:8]
except FileNotFoundError:
return 'NOFILE'
def select_checkpoint():
model_checkpoint = shared.opts.sd_model_checkpoint
checkpoint_info = checkpoints_list.get(model_checkpoint, None)
if checkpoint_info is not None:
return checkpoint_info
if len(checkpoints_list) == 0:
print(f"No checkpoints found. When searching for checkpoints, looked at:", file=sys.stderr)
if shared.cmd_opts.ckpt is not None:
print(f" - file {os.path.abspath(shared.cmd_opts.ckpt)}", file=sys.stderr)
print(f" - directory {model_path}", file=sys.stderr)
if shared.cmd_opts.ckpt_dir is not None:
print(f" - directory {os.path.abspath(shared.cmd_opts.ckpt_dir)}", file=sys.stderr)
print(f"Can't run without a checkpoint. Find and place a .ckpt file into any of those locations. The program will exit.", file=sys.stderr)
exit(1)
checkpoint_info = next(iter(checkpoints_list.values()))
if model_checkpoint is not None:
print(f"Checkpoint {model_checkpoint} not found; loading fallback {checkpoint_info.title}", file=sys.stderr)
return checkpoint_info
chckpoint_dict_replacements = {
'cond_stage_model.transformer.embeddings.': 'cond_stage_model.transformer.text_model.embeddings.',
'cond_stage_model.transformer.encoder.': 'cond_stage_model.transformer.text_model.encoder.',
'cond_stage_model.transformer.final_layer_norm.': 'cond_stage_model.transformer.text_model.final_layer_norm.',
}
def transform_checkpoint_dict_key(k):
for text, replacement in chckpoint_dict_replacements.items():
if k.startswith(text):
k = replacement + k[len(text):]
return k
def get_state_dict_from_checkpoint(pl_sd):
if "state_dict" in pl_sd:
pl_sd = pl_sd["state_dict"]
sd = {}
for k, v in pl_sd.items():
new_key = transform_checkpoint_dict_key(k)
if new_key is not None:
sd[new_key] = v
pl_sd.clear()
pl_sd.update(sd)
return pl_sd
def load_model_weights(model, checkpoint_info, vae_file="auto"):
checkpoint_file = checkpoint_info.filename
sd_model_hash = checkpoint_info.hash
vae_file = sd_vae.resolve_vae(checkpoint_file, vae_file=vae_file)
checkpoint_key = checkpoint_info
if checkpoint_key not in checkpoints_loaded:
print(f"Loading weights [{sd_model_hash}] from {checkpoint_file}")
pl_sd = torch.load(checkpoint_file, map_location=shared.weight_load_location)
if "global_step" in pl_sd:
print(f"Global Step: {pl_sd['global_step']}")
sd = get_state_dict_from_checkpoint(pl_sd)
del pl_sd
model.load_state_dict(sd, strict=False)
del sd
if shared.cmd_opts.opt_channelslast:
model.to(memory_format=torch.channels_last)
if not shared.cmd_opts.no_half:
vae = model.first_stage_model
# with --no-half-vae, remove VAE from model when doing half() to prevent its weights from being converted to float16
if shared.cmd_opts.no_half_vae:
model.first_stage_model = None
model.half()
model.first_stage_model = vae
devices.dtype = torch.float32 if shared.cmd_opts.no_half else torch.float16
devices.dtype_vae = torch.float32 if shared.cmd_opts.no_half or shared.cmd_opts.no_half_vae else torch.float16
model.first_stage_model.to(devices.dtype_vae)
if shared.opts.sd_checkpoint_cache > 0:
# if PR #4035 were to get merged, restore base VAE first before caching
checkpoints_loaded[checkpoint_key] = model.state_dict().copy()
while len(checkpoints_loaded) > shared.opts.sd_checkpoint_cache:
checkpoints_loaded.popitem(last=False) # LRU
else:
vae_name = sd_vae.get_filename(vae_file) if vae_file else None
vae_message = f" with {vae_name} VAE" if vae_name else ""
print(f"Loading weights [{sd_model_hash}]{vae_message} from cache")
checkpoints_loaded.move_to_end(checkpoint_key)
model.load_state_dict(checkpoints_loaded[checkpoint_key])
model.sd_model_hash = sd_model_hash
model.sd_model_checkpoint = checkpoint_file
model.sd_checkpoint_info = checkpoint_info
sd_vae.load_vae(model, vae_file)
def load_model(checkpoint_info=None):
from modules import lowvram, sd_hijack
checkpoint_info = checkpoint_info or select_checkpoint()
if checkpoint_info.config != shared.cmd_opts.config:
print(f"Loading config from: {checkpoint_info.config}")
if shared.sd_model:
sd_hijack.model_hijack.undo_hijack(shared.sd_model)
shared.sd_model = None
gc.collect()
devices.torch_gc()
sd_config = OmegaConf.load(checkpoint_info.config)
if should_hijack_inpainting(checkpoint_info):
# Hardcoded config for now...
sd_config.model.target = "ldm.models.diffusion.ddpm.LatentInpaintDiffusion"
sd_config.model.params.use_ema = False
sd_config.model.params.conditioning_key = "hybrid"
sd_config.model.params.unet_config.params.in_channels = 9
# Create a "fake" config with a different name so that we know to unload it when switching models.
checkpoint_info = checkpoint_info._replace(config=checkpoint_info.config.replace(".yaml", "-inpainting.yaml"))
do_inpainting_hijack()
sd_model = instantiate_from_config(sd_config.model)
load_model_weights(sd_model, checkpoint_info)
if shared.cmd_opts.lowvram or shared.cmd_opts.medvram:
lowvram.setup_for_low_vram(sd_model, shared.cmd_opts.medvram)
else:
sd_model.to(shared.device)
sd_hijack.model_hijack.hijack(sd_model)
sd_model.eval()
shared.sd_model = sd_model
script_callbacks.model_loaded_callback(sd_model)
print(f"Model loaded.")
return sd_model
def reload_model_weights(sd_model=None, info=None):
from modules import lowvram, devices, sd_hijack
checkpoint_info = info or select_checkpoint()
if not sd_model:
sd_model = shared.sd_model
if sd_model.sd_model_checkpoint == checkpoint_info.filename:
return
if sd_model.sd_checkpoint_info.config != checkpoint_info.config or should_hijack_inpainting(checkpoint_info) != should_hijack_inpainting(sd_model.sd_checkpoint_info):
del sd_model
checkpoints_loaded.clear()
load_model(checkpoint_info)
return shared.sd_model
if shared.cmd_opts.lowvram or shared.cmd_opts.medvram:
lowvram.send_everything_to_cpu()
else:
sd_model.to(devices.cpu)
sd_hijack.model_hijack.undo_hijack(sd_model)
load_model_weights(sd_model, checkpoint_info)
sd_hijack.model_hijack.hijack(sd_model)
script_callbacks.model_loaded_callback(sd_model)
if not shared.cmd_opts.lowvram and not shared.cmd_opts.medvram:
sd_model.to(devices.device)
print(f"Weights loaded.")
return sd_model
<|code_end|>
|
modules/sd_models.py
<|code_start|>import collections
import os.path
import sys
import gc
from collections import namedtuple
import torch
import re
from omegaconf import OmegaConf
from ldm.util import instantiate_from_config
from modules import shared, modelloader, devices, script_callbacks, sd_vae
from modules.paths import models_path
from modules.sd_hijack_inpainting import do_inpainting_hijack, should_hijack_inpainting
model_dir = "Stable-diffusion"
model_path = os.path.abspath(os.path.join(models_path, model_dir))
CheckpointInfo = namedtuple("CheckpointInfo", ['filename', 'title', 'hash', 'model_name', 'config'])
checkpoints_list = {}
checkpoints_loaded = collections.OrderedDict()
try:
# this silences the annoying "Some weights of the model checkpoint were not used when initializing..." message at start.
from transformers import logging, CLIPModel
logging.set_verbosity_error()
except Exception:
pass
def setup_model():
if not os.path.exists(model_path):
os.makedirs(model_path)
list_models()
def checkpoint_tiles():
convert = lambda name: int(name) if name.isdigit() else name.lower()
alphanumeric_key = lambda key: [convert(c) for c in re.split('([0-9]+)', key)]
return sorted([x.title for x in checkpoints_list.values()], key = alphanumeric_key)
def list_models():
checkpoints_list.clear()
model_list = modelloader.load_models(model_path=model_path, command_path=shared.cmd_opts.ckpt_dir, ext_filter=[".ckpt"])
def modeltitle(path, shorthash):
abspath = os.path.abspath(path)
if shared.cmd_opts.ckpt_dir is not None and abspath.startswith(shared.cmd_opts.ckpt_dir):
name = abspath.replace(shared.cmd_opts.ckpt_dir, '')
elif abspath.startswith(model_path):
name = abspath.replace(model_path, '')
else:
name = os.path.basename(path)
if name.startswith("\\") or name.startswith("/"):
name = name[1:]
shortname = os.path.splitext(name.replace("/", "_").replace("\\", "_"))[0]
return f'{name} [{shorthash}]', shortname
cmd_ckpt = shared.cmd_opts.ckpt
if os.path.exists(cmd_ckpt):
h = model_hash(cmd_ckpt)
title, short_model_name = modeltitle(cmd_ckpt, h)
checkpoints_list[title] = CheckpointInfo(cmd_ckpt, title, h, short_model_name, shared.cmd_opts.config)
shared.opts.data['sd_model_checkpoint'] = title
elif cmd_ckpt is not None and cmd_ckpt != shared.default_sd_model_file:
print(f"Checkpoint in --ckpt argument not found (Possible it was moved to {model_path}: {cmd_ckpt}", file=sys.stderr)
for filename in model_list:
h = model_hash(filename)
title, short_model_name = modeltitle(filename, h)
basename, _ = os.path.splitext(filename)
config = basename + ".yaml"
if not os.path.exists(config):
config = shared.cmd_opts.config
checkpoints_list[title] = CheckpointInfo(filename, title, h, short_model_name, config)
def get_closet_checkpoint_match(searchString):
applicable = sorted([info for info in checkpoints_list.values() if searchString in info.title], key = lambda x:len(x.title))
if len(applicable) > 0:
return applicable[0]
return None
def model_hash(filename):
try:
with open(filename, "rb") as file:
import hashlib
m = hashlib.sha256()
file.seek(0x100000)
m.update(file.read(0x10000))
return m.hexdigest()[0:8]
except FileNotFoundError:
return 'NOFILE'
def select_checkpoint():
model_checkpoint = shared.opts.sd_model_checkpoint
checkpoint_info = checkpoints_list.get(model_checkpoint, None)
if checkpoint_info is not None:
return checkpoint_info
if len(checkpoints_list) == 0:
print(f"No checkpoints found. When searching for checkpoints, looked at:", file=sys.stderr)
if shared.cmd_opts.ckpt is not None:
print(f" - file {os.path.abspath(shared.cmd_opts.ckpt)}", file=sys.stderr)
print(f" - directory {model_path}", file=sys.stderr)
if shared.cmd_opts.ckpt_dir is not None:
print(f" - directory {os.path.abspath(shared.cmd_opts.ckpt_dir)}", file=sys.stderr)
print(f"Can't run without a checkpoint. Find and place a .ckpt file into any of those locations. The program will exit.", file=sys.stderr)
exit(1)
checkpoint_info = next(iter(checkpoints_list.values()))
if model_checkpoint is not None:
print(f"Checkpoint {model_checkpoint} not found; loading fallback {checkpoint_info.title}", file=sys.stderr)
return checkpoint_info
chckpoint_dict_replacements = {
'cond_stage_model.transformer.embeddings.': 'cond_stage_model.transformer.text_model.embeddings.',
'cond_stage_model.transformer.encoder.': 'cond_stage_model.transformer.text_model.encoder.',
'cond_stage_model.transformer.final_layer_norm.': 'cond_stage_model.transformer.text_model.final_layer_norm.',
}
def transform_checkpoint_dict_key(k):
for text, replacement in chckpoint_dict_replacements.items():
if k.startswith(text):
k = replacement + k[len(text):]
return k
def get_state_dict_from_checkpoint(pl_sd):
if "state_dict" in pl_sd:
pl_sd = pl_sd["state_dict"]
sd = {}
for k, v in pl_sd.items():
new_key = transform_checkpoint_dict_key(k)
if new_key is not None:
sd[new_key] = v
pl_sd.clear()
pl_sd.update(sd)
return pl_sd
def load_model_weights(model, checkpoint_info, vae_file="auto"):
checkpoint_file = checkpoint_info.filename
sd_model_hash = checkpoint_info.hash
if shared.opts.sd_checkpoint_cache > 0 and hasattr(model, "sd_checkpoint_info"):
sd_vae.restore_base_vae(model)
checkpoints_loaded[model.sd_checkpoint_info] = model.state_dict().copy()
if checkpoint_info not in checkpoints_loaded:
print(f"Loading weights [{sd_model_hash}] from {checkpoint_file}")
pl_sd = torch.load(checkpoint_file, map_location=shared.weight_load_location)
if "global_step" in pl_sd:
print(f"Global Step: {pl_sd['global_step']}")
sd = get_state_dict_from_checkpoint(pl_sd)
del pl_sd
model.load_state_dict(sd, strict=False)
del sd
if shared.cmd_opts.opt_channelslast:
model.to(memory_format=torch.channels_last)
if not shared.cmd_opts.no_half:
vae = model.first_stage_model
# with --no-half-vae, remove VAE from model when doing half() to prevent its weights from being converted to float16
if shared.cmd_opts.no_half_vae:
model.first_stage_model = None
model.half()
model.first_stage_model = vae
devices.dtype = torch.float32 if shared.cmd_opts.no_half else torch.float16
devices.dtype_vae = torch.float32 if shared.cmd_opts.no_half or shared.cmd_opts.no_half_vae else torch.float16
model.first_stage_model.to(devices.dtype_vae)
else:
vae_name = sd_vae.get_filename(vae_file) if vae_file else None
vae_message = f" with {vae_name} VAE" if vae_name else ""
print(f"Loading weights [{sd_model_hash}]{vae_message} from cache")
model.load_state_dict(checkpoints_loaded[checkpoint_info])
if shared.opts.sd_checkpoint_cache > 0:
while len(checkpoints_loaded) > shared.opts.sd_checkpoint_cache:
checkpoints_loaded.popitem(last=False) # LRU
model.sd_model_hash = sd_model_hash
model.sd_model_checkpoint = checkpoint_file
model.sd_checkpoint_info = checkpoint_info
sd_vae.load_vae(model, vae_file)
def load_model(checkpoint_info=None):
from modules import lowvram, sd_hijack
checkpoint_info = checkpoint_info or select_checkpoint()
if checkpoint_info.config != shared.cmd_opts.config:
print(f"Loading config from: {checkpoint_info.config}")
if shared.sd_model:
sd_hijack.model_hijack.undo_hijack(shared.sd_model)
shared.sd_model = None
gc.collect()
devices.torch_gc()
sd_config = OmegaConf.load(checkpoint_info.config)
if should_hijack_inpainting(checkpoint_info):
# Hardcoded config for now...
sd_config.model.target = "ldm.models.diffusion.ddpm.LatentInpaintDiffusion"
sd_config.model.params.use_ema = False
sd_config.model.params.conditioning_key = "hybrid"
sd_config.model.params.unet_config.params.in_channels = 9
# Create a "fake" config with a different name so that we know to unload it when switching models.
checkpoint_info = checkpoint_info._replace(config=checkpoint_info.config.replace(".yaml", "-inpainting.yaml"))
do_inpainting_hijack()
sd_model = instantiate_from_config(sd_config.model)
load_model_weights(sd_model, checkpoint_info)
if shared.cmd_opts.lowvram or shared.cmd_opts.medvram:
lowvram.setup_for_low_vram(sd_model, shared.cmd_opts.medvram)
else:
sd_model.to(shared.device)
sd_hijack.model_hijack.hijack(sd_model)
sd_model.eval()
shared.sd_model = sd_model
script_callbacks.model_loaded_callback(sd_model)
print(f"Model loaded.")
return sd_model
def reload_model_weights(sd_model=None, info=None):
from modules import lowvram, devices, sd_hijack
checkpoint_info = info or select_checkpoint()
if not sd_model:
sd_model = shared.sd_model
if sd_model.sd_model_checkpoint == checkpoint_info.filename:
return
if sd_model.sd_checkpoint_info.config != checkpoint_info.config or should_hijack_inpainting(checkpoint_info) != should_hijack_inpainting(sd_model.sd_checkpoint_info):
del sd_model
checkpoints_loaded.clear()
load_model(checkpoint_info)
return shared.sd_model
if shared.cmd_opts.lowvram or shared.cmd_opts.medvram:
lowvram.send_everything_to_cpu()
else:
sd_model.to(devices.cpu)
sd_hijack.model_hijack.undo_hijack(sd_model)
load_model_weights(sd_model, checkpoint_info)
sd_hijack.model_hijack.hijack(sd_model)
script_callbacks.model_loaded_callback(sd_model)
if not shared.cmd_opts.lowvram and not shared.cmd_opts.medvram:
sd_model.to(devices.device)
print(f"Weights loaded.")
return sd_model
<|code_end|>
|
[Bug]: Cannot disable writing of PNG Info
### Is there an existing issue for this?
- [X] I have searched the existing issues and checked the recent builds/commits
### What happened?
Unchecking this setting (and applying)
Save text information about generation parameters as chunks to png files
No longer prevents this information being written to the PNG.
### Steps to reproduce the problem
1. Go to Settings
2. Uncheck "Save text information about generation parameters as chunks to png files"
3. Apply settings (also tested after a restart)
4. Generate image and check with PNG Info and NotePad++ Info is still there.
### What should have happened?
When this option is unchecked generation parameters should not be written to png
### Commit where the problem happens
Commit hash: 737eb28faca8be2bb996ee0930ec77d1f7ebd939
### What platforms do you use to access UI ?
Windows
### What browsers do you use to access the UI ?
Google Chrome
### Command Line Arguments
_No response_
### Additional information, context and logs
Tested with and without aesthetic extension and any embeddings
|
modules/images.py
<|code_start|>import datetime
import sys
import traceback
import pytz
import io
import math
import os
from collections import namedtuple
import re
import numpy as np
import piexif
import piexif.helper
from PIL import Image, ImageFont, ImageDraw, PngImagePlugin
from fonts.ttf import Roboto
import string
from modules import sd_samplers, shared, script_callbacks
from modules.shared import opts, cmd_opts
LANCZOS = (Image.Resampling.LANCZOS if hasattr(Image, 'Resampling') else Image.LANCZOS)
def image_grid(imgs, batch_size=1, rows=None):
if rows is None:
if opts.n_rows > 0:
rows = opts.n_rows
elif opts.n_rows == 0:
rows = batch_size
elif opts.grid_prevent_empty_spots:
rows = math.floor(math.sqrt(len(imgs)))
while len(imgs) % rows != 0:
rows -= 1
else:
rows = math.sqrt(len(imgs))
rows = round(rows)
cols = math.ceil(len(imgs) / rows)
w, h = imgs[0].size
grid = Image.new('RGB', size=(cols * w, rows * h), color='black')
for i, img in enumerate(imgs):
grid.paste(img, box=(i % cols * w, i // cols * h))
return grid
Grid = namedtuple("Grid", ["tiles", "tile_w", "tile_h", "image_w", "image_h", "overlap"])
def split_grid(image, tile_w=512, tile_h=512, overlap=64):
w = image.width
h = image.height
non_overlap_width = tile_w - overlap
non_overlap_height = tile_h - overlap
cols = math.ceil((w - overlap) / non_overlap_width)
rows = math.ceil((h - overlap) / non_overlap_height)
dx = (w - tile_w) / (cols - 1) if cols > 1 else 0
dy = (h - tile_h) / (rows - 1) if rows > 1 else 0
grid = Grid([], tile_w, tile_h, w, h, overlap)
for row in range(rows):
row_images = []
y = int(row * dy)
if y + tile_h >= h:
y = h - tile_h
for col in range(cols):
x = int(col * dx)
if x + tile_w >= w:
x = w - tile_w
tile = image.crop((x, y, x + tile_w, y + tile_h))
row_images.append([x, tile_w, tile])
grid.tiles.append([y, tile_h, row_images])
return grid
def combine_grid(grid):
def make_mask_image(r):
r = r * 255 / grid.overlap
r = r.astype(np.uint8)
return Image.fromarray(r, 'L')
mask_w = make_mask_image(np.arange(grid.overlap, dtype=np.float32).reshape((1, grid.overlap)).repeat(grid.tile_h, axis=0))
mask_h = make_mask_image(np.arange(grid.overlap, dtype=np.float32).reshape((grid.overlap, 1)).repeat(grid.image_w, axis=1))
combined_image = Image.new("RGB", (grid.image_w, grid.image_h))
for y, h, row in grid.tiles:
combined_row = Image.new("RGB", (grid.image_w, h))
for x, w, tile in row:
if x == 0:
combined_row.paste(tile, (0, 0))
continue
combined_row.paste(tile.crop((0, 0, grid.overlap, h)), (x, 0), mask=mask_w)
combined_row.paste(tile.crop((grid.overlap, 0, w, h)), (x + grid.overlap, 0))
if y == 0:
combined_image.paste(combined_row, (0, 0))
continue
combined_image.paste(combined_row.crop((0, 0, combined_row.width, grid.overlap)), (0, y), mask=mask_h)
combined_image.paste(combined_row.crop((0, grid.overlap, combined_row.width, h)), (0, y + grid.overlap))
return combined_image
class GridAnnotation:
def __init__(self, text='', is_active=True):
self.text = text
self.is_active = is_active
self.size = None
def draw_grid_annotations(im, width, height, hor_texts, ver_texts):
def wrap(drawing, text, font, line_length):
lines = ['']
for word in text.split():
line = f'{lines[-1]} {word}'.strip()
if drawing.textlength(line, font=font) <= line_length:
lines[-1] = line
else:
lines.append(word)
return lines
def draw_texts(drawing, draw_x, draw_y, lines):
for i, line in enumerate(lines):
drawing.multiline_text((draw_x, draw_y + line.size[1] / 2), line.text, font=fnt, fill=color_active if line.is_active else color_inactive, anchor="mm", align="center")
if not line.is_active:
drawing.line((draw_x - line.size[0] // 2, draw_y + line.size[1] // 2, draw_x + line.size[0] // 2, draw_y + line.size[1] // 2), fill=color_inactive, width=4)
draw_y += line.size[1] + line_spacing
fontsize = (width + height) // 25
line_spacing = fontsize // 2
try:
fnt = ImageFont.truetype(opts.font or Roboto, fontsize)
except Exception:
fnt = ImageFont.truetype(Roboto, fontsize)
color_active = (0, 0, 0)
color_inactive = (153, 153, 153)
pad_left = 0 if sum([sum([len(line.text) for line in lines]) for lines in ver_texts]) == 0 else width * 3 // 4
cols = im.width // width
rows = im.height // height
assert cols == len(hor_texts), f'bad number of horizontal texts: {len(hor_texts)}; must be {cols}'
assert rows == len(ver_texts), f'bad number of vertical texts: {len(ver_texts)}; must be {rows}'
calc_img = Image.new("RGB", (1, 1), "white")
calc_d = ImageDraw.Draw(calc_img)
for texts, allowed_width in zip(hor_texts + ver_texts, [width] * len(hor_texts) + [pad_left] * len(ver_texts)):
items = [] + texts
texts.clear()
for line in items:
wrapped = wrap(calc_d, line.text, fnt, allowed_width)
texts += [GridAnnotation(x, line.is_active) for x in wrapped]
for line in texts:
bbox = calc_d.multiline_textbbox((0, 0), line.text, font=fnt)
line.size = (bbox[2] - bbox[0], bbox[3] - bbox[1])
hor_text_heights = [sum([line.size[1] + line_spacing for line in lines]) - line_spacing for lines in hor_texts]
ver_text_heights = [sum([line.size[1] + line_spacing for line in lines]) - line_spacing * len(lines) for lines in
ver_texts]
pad_top = max(hor_text_heights) + line_spacing * 2
result = Image.new("RGB", (im.width + pad_left, im.height + pad_top), "white")
result.paste(im, (pad_left, pad_top))
d = ImageDraw.Draw(result)
for col in range(cols):
x = pad_left + width * col + width / 2
y = pad_top / 2 - hor_text_heights[col] / 2
draw_texts(d, x, y, hor_texts[col])
for row in range(rows):
x = pad_left / 2
y = pad_top + height * row + height / 2 - ver_text_heights[row] / 2
draw_texts(d, x, y, ver_texts[row])
return result
def draw_prompt_matrix(im, width, height, all_prompts):
prompts = all_prompts[1:]
boundary = math.ceil(len(prompts) / 2)
prompts_horiz = prompts[:boundary]
prompts_vert = prompts[boundary:]
hor_texts = [[GridAnnotation(x, is_active=pos & (1 << i) != 0) for i, x in enumerate(prompts_horiz)] for pos in range(1 << len(prompts_horiz))]
ver_texts = [[GridAnnotation(x, is_active=pos & (1 << i) != 0) for i, x in enumerate(prompts_vert)] for pos in range(1 << len(prompts_vert))]
return draw_grid_annotations(im, width, height, hor_texts, ver_texts)
def resize_image(resize_mode, im, width, height):
def resize(im, w, h):
if opts.upscaler_for_img2img is None or opts.upscaler_for_img2img == "None" or im.mode == 'L':
return im.resize((w, h), resample=LANCZOS)
scale = max(w / im.width, h / im.height)
if scale > 1.0:
upscalers = [x for x in shared.sd_upscalers if x.name == opts.upscaler_for_img2img]
assert len(upscalers) > 0, f"could not find upscaler named {opts.upscaler_for_img2img}"
upscaler = upscalers[0]
im = upscaler.scaler.upscale(im, scale, upscaler.data_path)
if im.width != w or im.height != h:
im = im.resize((w, h), resample=LANCZOS)
return im
if resize_mode == 0:
res = resize(im, width, height)
elif resize_mode == 1:
ratio = width / height
src_ratio = im.width / im.height
src_w = width if ratio > src_ratio else im.width * height // im.height
src_h = height if ratio <= src_ratio else im.height * width // im.width
resized = resize(im, src_w, src_h)
res = Image.new("RGB", (width, height))
res.paste(resized, box=(width // 2 - src_w // 2, height // 2 - src_h // 2))
else:
ratio = width / height
src_ratio = im.width / im.height
src_w = width if ratio < src_ratio else im.width * height // im.height
src_h = height if ratio >= src_ratio else im.height * width // im.width
resized = resize(im, src_w, src_h)
res = Image.new("RGB", (width, height))
res.paste(resized, box=(width // 2 - src_w // 2, height // 2 - src_h // 2))
if ratio < src_ratio:
fill_height = height // 2 - src_h // 2
res.paste(resized.resize((width, fill_height), box=(0, 0, width, 0)), box=(0, 0))
res.paste(resized.resize((width, fill_height), box=(0, resized.height, width, resized.height)), box=(0, fill_height + src_h))
elif ratio > src_ratio:
fill_width = width // 2 - src_w // 2
res.paste(resized.resize((fill_width, height), box=(0, 0, 0, height)), box=(0, 0))
res.paste(resized.resize((fill_width, height), box=(resized.width, 0, resized.width, height)), box=(fill_width + src_w, 0))
return res
invalid_filename_chars = '<>:"/\\|?*\n'
invalid_filename_prefix = ' '
invalid_filename_postfix = ' .'
re_nonletters = re.compile(r'[\s' + string.punctuation + ']+')
re_pattern = re.compile(r"(.*?)(?:\[([^\[\]]+)\]|$)")
re_pattern_arg = re.compile(r"(.*)<([^>]*)>$")
max_filename_part_length = 128
def sanitize_filename_part(text, replace_spaces=True):
if text is None:
return None
if replace_spaces:
text = text.replace(' ', '_')
text = text.translate({ord(x): '_' for x in invalid_filename_chars})
text = text.lstrip(invalid_filename_prefix)[:max_filename_part_length]
text = text.rstrip(invalid_filename_postfix)
return text
class FilenameGenerator:
replacements = {
'seed': lambda self: self.seed if self.seed is not None else '',
'steps': lambda self: self.p and self.p.steps,
'cfg': lambda self: self.p and self.p.cfg_scale,
'width': lambda self: self.image.width,
'height': lambda self: self.image.height,
'styles': lambda self: self.p and sanitize_filename_part(", ".join([style for style in self.p.styles if not style == "None"]) or "None", replace_spaces=False),
'sampler': lambda self: self.p and sanitize_filename_part(sd_samplers.samplers[self.p.sampler_index].name, replace_spaces=False),
'model_hash': lambda self: getattr(self.p, "sd_model_hash", shared.sd_model.sd_model_hash),
'date': lambda self: datetime.datetime.now().strftime('%Y-%m-%d'),
'datetime': lambda self, *args: self.datetime(*args), # accepts formats: [datetime], [datetime<Format>], [datetime<Format><Time Zone>]
'job_timestamp': lambda self: getattr(self.p, "job_timestamp", shared.state.job_timestamp),
'prompt': lambda self: sanitize_filename_part(self.prompt),
'prompt_no_styles': lambda self: self.prompt_no_style(),
'prompt_spaces': lambda self: sanitize_filename_part(self.prompt, replace_spaces=False),
'prompt_words': lambda self: self.prompt_words(),
}
default_time_format = '%Y%m%d%H%M%S'
def __init__(self, p, seed, prompt, image):
self.p = p
self.seed = seed
self.prompt = prompt
self.image = image
def prompt_no_style(self):
if self.p is None or self.prompt is None:
return None
prompt_no_style = self.prompt
for style in shared.prompt_styles.get_style_prompts(self.p.styles):
if len(style) > 0:
for part in style.split("{prompt}"):
prompt_no_style = prompt_no_style.replace(part, "").replace(", ,", ",").strip().strip(',')
prompt_no_style = prompt_no_style.replace(style, "").strip().strip(',').strip()
return sanitize_filename_part(prompt_no_style, replace_spaces=False)
def prompt_words(self):
words = [x for x in re_nonletters.split(self.prompt or "") if len(x) > 0]
if len(words) == 0:
words = ["empty"]
return sanitize_filename_part(" ".join(words[0:opts.directories_max_prompt_words]), replace_spaces=False)
def datetime(self, *args):
time_datetime = datetime.datetime.now()
time_format = args[0] if len(args) > 0 and args[0] != "" else self.default_time_format
try:
time_zone = pytz.timezone(args[1]) if len(args) > 1 else None
except pytz.exceptions.UnknownTimeZoneError as _:
time_zone = None
time_zone_time = time_datetime.astimezone(time_zone)
try:
formatted_time = time_zone_time.strftime(time_format)
except (ValueError, TypeError) as _:
formatted_time = time_zone_time.strftime(self.default_time_format)
return sanitize_filename_part(formatted_time, replace_spaces=False)
def apply(self, x):
res = ''
for m in re_pattern.finditer(x):
text, pattern = m.groups()
res += text
if pattern is None:
continue
pattern_args = []
while True:
m = re_pattern_arg.match(pattern)
if m is None:
break
pattern, arg = m.groups()
pattern_args.insert(0, arg)
fun = self.replacements.get(pattern.lower())
if fun is not None:
try:
replacement = fun(self, *pattern_args)
except Exception:
replacement = None
print(f"Error adding [{pattern}] to filename", file=sys.stderr)
print(traceback.format_exc(), file=sys.stderr)
if replacement is not None:
res += str(replacement)
continue
res += f'[{pattern}]'
return res
def get_next_sequence_number(path, basename):
"""
Determines and returns the next sequence number to use when saving an image in the specified directory.
The sequence starts at 0.
"""
result = -1
if basename != '':
basename = basename + "-"
prefix_length = len(basename)
for p in os.listdir(path):
if p.startswith(basename):
l = os.path.splitext(p[prefix_length:])[0].split('-') # splits the filename (removing the basename first if one is defined, so the sequence number is always the first element)
try:
result = max(int(l[0]), result)
except ValueError:
pass
return result + 1
def save_image(image, path, basename, seed=None, prompt=None, extension='png', info=None, short_filename=False, no_prompt=False, grid=False, pnginfo_section_name='parameters', p=None, existing_info=None, forced_filename=None, suffix="", save_to_dirs=None):
"""Save an image.
Args:
image (`PIL.Image`):
The image to be saved.
path (`str`):
The directory to save the image. Note, the option `save_to_dirs` will make the image to be saved into a sub directory.
basename (`str`):
The base filename which will be applied to `filename pattern`.
seed, prompt, short_filename,
extension (`str`):
Image file extension, default is `png`.
pngsectionname (`str`):
Specify the name of the section which `info` will be saved in.
info (`str` or `PngImagePlugin.iTXt`):
PNG info chunks.
existing_info (`dict`):
Additional PNG info. `existing_info == {pngsectionname: info, ...}`
no_prompt:
TODO I don't know its meaning.
p (`StableDiffusionProcessing`)
forced_filename (`str`):
If specified, `basename` and filename pattern will be ignored.
save_to_dirs (bool):
If true, the image will be saved into a subdirectory of `path`.
Returns: (fullfn, txt_fullfn)
fullfn (`str`):
The full path of the saved imaged.
txt_fullfn (`str` or None):
If a text file is saved for this image, this will be its full path. Otherwise None.
"""
namegen = FilenameGenerator(p, seed, prompt, image)
if save_to_dirs is None:
save_to_dirs = (grid and opts.grid_save_to_dirs) or (not grid and opts.save_to_dirs and not no_prompt)
if save_to_dirs:
dirname = namegen.apply(opts.directories_filename_pattern or "[prompt_words]").lstrip(' ').rstrip('\\ /')
path = os.path.join(path, dirname)
os.makedirs(path, exist_ok=True)
if forced_filename is None:
if short_filename or seed is None:
file_decoration = ""
elif opts.save_to_dirs:
file_decoration = opts.samples_filename_pattern or "[seed]"
else:
file_decoration = opts.samples_filename_pattern or "[seed]-[prompt_spaces]"
add_number = opts.save_images_add_number or file_decoration == ''
if file_decoration != "" and add_number:
file_decoration = "-" + file_decoration
file_decoration = namegen.apply(file_decoration) + suffix
if add_number:
basecount = get_next_sequence_number(path, basename)
fullfn = None
for i in range(500):
fn = f"{basecount + i:05}" if basename == '' else f"{basename}-{basecount + i:04}"
fullfn = os.path.join(path, f"{fn}{file_decoration}.{extension}")
if not os.path.exists(fullfn):
break
else:
fullfn = os.path.join(path, f"{file_decoration}.{extension}")
else:
fullfn = os.path.join(path, f"{forced_filename}.{extension}")
pnginfo = existing_info or {}
if info is not None:
pnginfo[pnginfo_section_name] = info
params = script_callbacks.ImageSaveParams(image, p, fullfn, pnginfo)
script_callbacks.before_image_saved_callback(params)
image = params.image
fullfn = params.filename
info = params.pnginfo.get(pnginfo_section_name, None)
fullfn_without_extension, extension = os.path.splitext(params.filename)
def exif_bytes():
return piexif.dump({
"Exif": {
piexif.ExifIFD.UserComment: piexif.helper.UserComment.dump(info or "", encoding="unicode")
},
})
if extension.lower() == '.png':
pnginfo_data = PngImagePlugin.PngInfo()
for k, v in params.pnginfo.items():
pnginfo_data.add_text(k, str(v))
image.save(fullfn, quality=opts.jpeg_quality, pnginfo=pnginfo_data)
elif extension.lower() in (".jpg", ".jpeg", ".webp"):
image.save(fullfn, quality=opts.jpeg_quality)
if opts.enable_pnginfo and info is not None:
piexif.insert(exif_bytes(), fullfn)
else:
image.save(fullfn, quality=opts.jpeg_quality)
target_side_length = 4000
oversize = image.width > target_side_length or image.height > target_side_length
if opts.export_for_4chan and (oversize or os.stat(fullfn).st_size > 4 * 1024 * 1024):
ratio = image.width / image.height
if oversize and ratio > 1:
image = image.resize((target_side_length, image.height * target_side_length // image.width), LANCZOS)
elif oversize:
image = image.resize((image.width * target_side_length // image.height, target_side_length), LANCZOS)
image.save(fullfn_without_extension + ".jpg", quality=opts.jpeg_quality)
if opts.enable_pnginfo and info is not None:
piexif.insert(exif_bytes(), fullfn_without_extension + ".jpg")
if opts.save_txt and info is not None:
txt_fullfn = f"{fullfn_without_extension}.txt"
with open(txt_fullfn, "w", encoding="utf8") as file:
file.write(info + "\n")
else:
txt_fullfn = None
script_callbacks.image_saved_callback(params)
return fullfn, txt_fullfn
def image_data(data):
try:
image = Image.open(io.BytesIO(data))
textinfo = image.text["parameters"]
return textinfo, None
except Exception:
pass
try:
text = data.decode('utf8')
assert len(text) < 10000
return text, None
except Exception:
pass
return '', None
<|code_end|>
|
modules/images.py
<|code_start|>import datetime
import sys
import traceback
import pytz
import io
import math
import os
from collections import namedtuple
import re
import numpy as np
import piexif
import piexif.helper
from PIL import Image, ImageFont, ImageDraw, PngImagePlugin
from fonts.ttf import Roboto
import string
from modules import sd_samplers, shared, script_callbacks
from modules.shared import opts, cmd_opts
LANCZOS = (Image.Resampling.LANCZOS if hasattr(Image, 'Resampling') else Image.LANCZOS)
def image_grid(imgs, batch_size=1, rows=None):
if rows is None:
if opts.n_rows > 0:
rows = opts.n_rows
elif opts.n_rows == 0:
rows = batch_size
elif opts.grid_prevent_empty_spots:
rows = math.floor(math.sqrt(len(imgs)))
while len(imgs) % rows != 0:
rows -= 1
else:
rows = math.sqrt(len(imgs))
rows = round(rows)
cols = math.ceil(len(imgs) / rows)
w, h = imgs[0].size
grid = Image.new('RGB', size=(cols * w, rows * h), color='black')
for i, img in enumerate(imgs):
grid.paste(img, box=(i % cols * w, i // cols * h))
return grid
Grid = namedtuple("Grid", ["tiles", "tile_w", "tile_h", "image_w", "image_h", "overlap"])
def split_grid(image, tile_w=512, tile_h=512, overlap=64):
w = image.width
h = image.height
non_overlap_width = tile_w - overlap
non_overlap_height = tile_h - overlap
cols = math.ceil((w - overlap) / non_overlap_width)
rows = math.ceil((h - overlap) / non_overlap_height)
dx = (w - tile_w) / (cols - 1) if cols > 1 else 0
dy = (h - tile_h) / (rows - 1) if rows > 1 else 0
grid = Grid([], tile_w, tile_h, w, h, overlap)
for row in range(rows):
row_images = []
y = int(row * dy)
if y + tile_h >= h:
y = h - tile_h
for col in range(cols):
x = int(col * dx)
if x + tile_w >= w:
x = w - tile_w
tile = image.crop((x, y, x + tile_w, y + tile_h))
row_images.append([x, tile_w, tile])
grid.tiles.append([y, tile_h, row_images])
return grid
def combine_grid(grid):
def make_mask_image(r):
r = r * 255 / grid.overlap
r = r.astype(np.uint8)
return Image.fromarray(r, 'L')
mask_w = make_mask_image(np.arange(grid.overlap, dtype=np.float32).reshape((1, grid.overlap)).repeat(grid.tile_h, axis=0))
mask_h = make_mask_image(np.arange(grid.overlap, dtype=np.float32).reshape((grid.overlap, 1)).repeat(grid.image_w, axis=1))
combined_image = Image.new("RGB", (grid.image_w, grid.image_h))
for y, h, row in grid.tiles:
combined_row = Image.new("RGB", (grid.image_w, h))
for x, w, tile in row:
if x == 0:
combined_row.paste(tile, (0, 0))
continue
combined_row.paste(tile.crop((0, 0, grid.overlap, h)), (x, 0), mask=mask_w)
combined_row.paste(tile.crop((grid.overlap, 0, w, h)), (x + grid.overlap, 0))
if y == 0:
combined_image.paste(combined_row, (0, 0))
continue
combined_image.paste(combined_row.crop((0, 0, combined_row.width, grid.overlap)), (0, y), mask=mask_h)
combined_image.paste(combined_row.crop((0, grid.overlap, combined_row.width, h)), (0, y + grid.overlap))
return combined_image
class GridAnnotation:
def __init__(self, text='', is_active=True):
self.text = text
self.is_active = is_active
self.size = None
def draw_grid_annotations(im, width, height, hor_texts, ver_texts):
def wrap(drawing, text, font, line_length):
lines = ['']
for word in text.split():
line = f'{lines[-1]} {word}'.strip()
if drawing.textlength(line, font=font) <= line_length:
lines[-1] = line
else:
lines.append(word)
return lines
def draw_texts(drawing, draw_x, draw_y, lines):
for i, line in enumerate(lines):
drawing.multiline_text((draw_x, draw_y + line.size[1] / 2), line.text, font=fnt, fill=color_active if line.is_active else color_inactive, anchor="mm", align="center")
if not line.is_active:
drawing.line((draw_x - line.size[0] // 2, draw_y + line.size[1] // 2, draw_x + line.size[0] // 2, draw_y + line.size[1] // 2), fill=color_inactive, width=4)
draw_y += line.size[1] + line_spacing
fontsize = (width + height) // 25
line_spacing = fontsize // 2
try:
fnt = ImageFont.truetype(opts.font or Roboto, fontsize)
except Exception:
fnt = ImageFont.truetype(Roboto, fontsize)
color_active = (0, 0, 0)
color_inactive = (153, 153, 153)
pad_left = 0 if sum([sum([len(line.text) for line in lines]) for lines in ver_texts]) == 0 else width * 3 // 4
cols = im.width // width
rows = im.height // height
assert cols == len(hor_texts), f'bad number of horizontal texts: {len(hor_texts)}; must be {cols}'
assert rows == len(ver_texts), f'bad number of vertical texts: {len(ver_texts)}; must be {rows}'
calc_img = Image.new("RGB", (1, 1), "white")
calc_d = ImageDraw.Draw(calc_img)
for texts, allowed_width in zip(hor_texts + ver_texts, [width] * len(hor_texts) + [pad_left] * len(ver_texts)):
items = [] + texts
texts.clear()
for line in items:
wrapped = wrap(calc_d, line.text, fnt, allowed_width)
texts += [GridAnnotation(x, line.is_active) for x in wrapped]
for line in texts:
bbox = calc_d.multiline_textbbox((0, 0), line.text, font=fnt)
line.size = (bbox[2] - bbox[0], bbox[3] - bbox[1])
hor_text_heights = [sum([line.size[1] + line_spacing for line in lines]) - line_spacing for lines in hor_texts]
ver_text_heights = [sum([line.size[1] + line_spacing for line in lines]) - line_spacing * len(lines) for lines in
ver_texts]
pad_top = max(hor_text_heights) + line_spacing * 2
result = Image.new("RGB", (im.width + pad_left, im.height + pad_top), "white")
result.paste(im, (pad_left, pad_top))
d = ImageDraw.Draw(result)
for col in range(cols):
x = pad_left + width * col + width / 2
y = pad_top / 2 - hor_text_heights[col] / 2
draw_texts(d, x, y, hor_texts[col])
for row in range(rows):
x = pad_left / 2
y = pad_top + height * row + height / 2 - ver_text_heights[row] / 2
draw_texts(d, x, y, ver_texts[row])
return result
def draw_prompt_matrix(im, width, height, all_prompts):
prompts = all_prompts[1:]
boundary = math.ceil(len(prompts) / 2)
prompts_horiz = prompts[:boundary]
prompts_vert = prompts[boundary:]
hor_texts = [[GridAnnotation(x, is_active=pos & (1 << i) != 0) for i, x in enumerate(prompts_horiz)] for pos in range(1 << len(prompts_horiz))]
ver_texts = [[GridAnnotation(x, is_active=pos & (1 << i) != 0) for i, x in enumerate(prompts_vert)] for pos in range(1 << len(prompts_vert))]
return draw_grid_annotations(im, width, height, hor_texts, ver_texts)
def resize_image(resize_mode, im, width, height):
def resize(im, w, h):
if opts.upscaler_for_img2img is None or opts.upscaler_for_img2img == "None" or im.mode == 'L':
return im.resize((w, h), resample=LANCZOS)
scale = max(w / im.width, h / im.height)
if scale > 1.0:
upscalers = [x for x in shared.sd_upscalers if x.name == opts.upscaler_for_img2img]
assert len(upscalers) > 0, f"could not find upscaler named {opts.upscaler_for_img2img}"
upscaler = upscalers[0]
im = upscaler.scaler.upscale(im, scale, upscaler.data_path)
if im.width != w or im.height != h:
im = im.resize((w, h), resample=LANCZOS)
return im
if resize_mode == 0:
res = resize(im, width, height)
elif resize_mode == 1:
ratio = width / height
src_ratio = im.width / im.height
src_w = width if ratio > src_ratio else im.width * height // im.height
src_h = height if ratio <= src_ratio else im.height * width // im.width
resized = resize(im, src_w, src_h)
res = Image.new("RGB", (width, height))
res.paste(resized, box=(width // 2 - src_w // 2, height // 2 - src_h // 2))
else:
ratio = width / height
src_ratio = im.width / im.height
src_w = width if ratio < src_ratio else im.width * height // im.height
src_h = height if ratio >= src_ratio else im.height * width // im.width
resized = resize(im, src_w, src_h)
res = Image.new("RGB", (width, height))
res.paste(resized, box=(width // 2 - src_w // 2, height // 2 - src_h // 2))
if ratio < src_ratio:
fill_height = height // 2 - src_h // 2
res.paste(resized.resize((width, fill_height), box=(0, 0, width, 0)), box=(0, 0))
res.paste(resized.resize((width, fill_height), box=(0, resized.height, width, resized.height)), box=(0, fill_height + src_h))
elif ratio > src_ratio:
fill_width = width // 2 - src_w // 2
res.paste(resized.resize((fill_width, height), box=(0, 0, 0, height)), box=(0, 0))
res.paste(resized.resize((fill_width, height), box=(resized.width, 0, resized.width, height)), box=(fill_width + src_w, 0))
return res
invalid_filename_chars = '<>:"/\\|?*\n'
invalid_filename_prefix = ' '
invalid_filename_postfix = ' .'
re_nonletters = re.compile(r'[\s' + string.punctuation + ']+')
re_pattern = re.compile(r"(.*?)(?:\[([^\[\]]+)\]|$)")
re_pattern_arg = re.compile(r"(.*)<([^>]*)>$")
max_filename_part_length = 128
def sanitize_filename_part(text, replace_spaces=True):
if text is None:
return None
if replace_spaces:
text = text.replace(' ', '_')
text = text.translate({ord(x): '_' for x in invalid_filename_chars})
text = text.lstrip(invalid_filename_prefix)[:max_filename_part_length]
text = text.rstrip(invalid_filename_postfix)
return text
class FilenameGenerator:
replacements = {
'seed': lambda self: self.seed if self.seed is not None else '',
'steps': lambda self: self.p and self.p.steps,
'cfg': lambda self: self.p and self.p.cfg_scale,
'width': lambda self: self.image.width,
'height': lambda self: self.image.height,
'styles': lambda self: self.p and sanitize_filename_part(", ".join([style for style in self.p.styles if not style == "None"]) or "None", replace_spaces=False),
'sampler': lambda self: self.p and sanitize_filename_part(sd_samplers.samplers[self.p.sampler_index].name, replace_spaces=False),
'model_hash': lambda self: getattr(self.p, "sd_model_hash", shared.sd_model.sd_model_hash),
'date': lambda self: datetime.datetime.now().strftime('%Y-%m-%d'),
'datetime': lambda self, *args: self.datetime(*args), # accepts formats: [datetime], [datetime<Format>], [datetime<Format><Time Zone>]
'job_timestamp': lambda self: getattr(self.p, "job_timestamp", shared.state.job_timestamp),
'prompt': lambda self: sanitize_filename_part(self.prompt),
'prompt_no_styles': lambda self: self.prompt_no_style(),
'prompt_spaces': lambda self: sanitize_filename_part(self.prompt, replace_spaces=False),
'prompt_words': lambda self: self.prompt_words(),
}
default_time_format = '%Y%m%d%H%M%S'
def __init__(self, p, seed, prompt, image):
self.p = p
self.seed = seed
self.prompt = prompt
self.image = image
def prompt_no_style(self):
if self.p is None or self.prompt is None:
return None
prompt_no_style = self.prompt
for style in shared.prompt_styles.get_style_prompts(self.p.styles):
if len(style) > 0:
for part in style.split("{prompt}"):
prompt_no_style = prompt_no_style.replace(part, "").replace(", ,", ",").strip().strip(',')
prompt_no_style = prompt_no_style.replace(style, "").strip().strip(',').strip()
return sanitize_filename_part(prompt_no_style, replace_spaces=False)
def prompt_words(self):
words = [x for x in re_nonletters.split(self.prompt or "") if len(x) > 0]
if len(words) == 0:
words = ["empty"]
return sanitize_filename_part(" ".join(words[0:opts.directories_max_prompt_words]), replace_spaces=False)
def datetime(self, *args):
time_datetime = datetime.datetime.now()
time_format = args[0] if len(args) > 0 and args[0] != "" else self.default_time_format
try:
time_zone = pytz.timezone(args[1]) if len(args) > 1 else None
except pytz.exceptions.UnknownTimeZoneError as _:
time_zone = None
time_zone_time = time_datetime.astimezone(time_zone)
try:
formatted_time = time_zone_time.strftime(time_format)
except (ValueError, TypeError) as _:
formatted_time = time_zone_time.strftime(self.default_time_format)
return sanitize_filename_part(formatted_time, replace_spaces=False)
def apply(self, x):
res = ''
for m in re_pattern.finditer(x):
text, pattern = m.groups()
res += text
if pattern is None:
continue
pattern_args = []
while True:
m = re_pattern_arg.match(pattern)
if m is None:
break
pattern, arg = m.groups()
pattern_args.insert(0, arg)
fun = self.replacements.get(pattern.lower())
if fun is not None:
try:
replacement = fun(self, *pattern_args)
except Exception:
replacement = None
print(f"Error adding [{pattern}] to filename", file=sys.stderr)
print(traceback.format_exc(), file=sys.stderr)
if replacement is not None:
res += str(replacement)
continue
res += f'[{pattern}]'
return res
def get_next_sequence_number(path, basename):
"""
Determines and returns the next sequence number to use when saving an image in the specified directory.
The sequence starts at 0.
"""
result = -1
if basename != '':
basename = basename + "-"
prefix_length = len(basename)
for p in os.listdir(path):
if p.startswith(basename):
l = os.path.splitext(p[prefix_length:])[0].split('-') # splits the filename (removing the basename first if one is defined, so the sequence number is always the first element)
try:
result = max(int(l[0]), result)
except ValueError:
pass
return result + 1
def save_image(image, path, basename, seed=None, prompt=None, extension='png', info=None, short_filename=False, no_prompt=False, grid=False, pnginfo_section_name='parameters', p=None, existing_info=None, forced_filename=None, suffix="", save_to_dirs=None):
"""Save an image.
Args:
image (`PIL.Image`):
The image to be saved.
path (`str`):
The directory to save the image. Note, the option `save_to_dirs` will make the image to be saved into a sub directory.
basename (`str`):
The base filename which will be applied to `filename pattern`.
seed, prompt, short_filename,
extension (`str`):
Image file extension, default is `png`.
pngsectionname (`str`):
Specify the name of the section which `info` will be saved in.
info (`str` or `PngImagePlugin.iTXt`):
PNG info chunks.
existing_info (`dict`):
Additional PNG info. `existing_info == {pngsectionname: info, ...}`
no_prompt:
TODO I don't know its meaning.
p (`StableDiffusionProcessing`)
forced_filename (`str`):
If specified, `basename` and filename pattern will be ignored.
save_to_dirs (bool):
If true, the image will be saved into a subdirectory of `path`.
Returns: (fullfn, txt_fullfn)
fullfn (`str`):
The full path of the saved imaged.
txt_fullfn (`str` or None):
If a text file is saved for this image, this will be its full path. Otherwise None.
"""
namegen = FilenameGenerator(p, seed, prompt, image)
if save_to_dirs is None:
save_to_dirs = (grid and opts.grid_save_to_dirs) or (not grid and opts.save_to_dirs and not no_prompt)
if save_to_dirs:
dirname = namegen.apply(opts.directories_filename_pattern or "[prompt_words]").lstrip(' ').rstrip('\\ /')
path = os.path.join(path, dirname)
os.makedirs(path, exist_ok=True)
if forced_filename is None:
if short_filename or seed is None:
file_decoration = ""
elif opts.save_to_dirs:
file_decoration = opts.samples_filename_pattern or "[seed]"
else:
file_decoration = opts.samples_filename_pattern or "[seed]-[prompt_spaces]"
add_number = opts.save_images_add_number or file_decoration == ''
if file_decoration != "" and add_number:
file_decoration = "-" + file_decoration
file_decoration = namegen.apply(file_decoration) + suffix
if add_number:
basecount = get_next_sequence_number(path, basename)
fullfn = None
for i in range(500):
fn = f"{basecount + i:05}" if basename == '' else f"{basename}-{basecount + i:04}"
fullfn = os.path.join(path, f"{fn}{file_decoration}.{extension}")
if not os.path.exists(fullfn):
break
else:
fullfn = os.path.join(path, f"{file_decoration}.{extension}")
else:
fullfn = os.path.join(path, f"{forced_filename}.{extension}")
pnginfo = existing_info or {}
if info is not None:
pnginfo[pnginfo_section_name] = info
params = script_callbacks.ImageSaveParams(image, p, fullfn, pnginfo)
script_callbacks.before_image_saved_callback(params)
image = params.image
fullfn = params.filename
info = params.pnginfo.get(pnginfo_section_name, None)
fullfn_without_extension, extension = os.path.splitext(params.filename)
def exif_bytes():
return piexif.dump({
"Exif": {
piexif.ExifIFD.UserComment: piexif.helper.UserComment.dump(info or "", encoding="unicode")
},
})
if extension.lower() == '.png':
pnginfo_data = PngImagePlugin.PngInfo()
if opts.enable_pnginfo:
for k, v in params.pnginfo.items():
pnginfo_data.add_text(k, str(v))
image.save(fullfn, quality=opts.jpeg_quality, pnginfo=pnginfo_data)
elif extension.lower() in (".jpg", ".jpeg", ".webp"):
image.save(fullfn, quality=opts.jpeg_quality)
if opts.enable_pnginfo and info is not None:
piexif.insert(exif_bytes(), fullfn)
else:
image.save(fullfn, quality=opts.jpeg_quality)
target_side_length = 4000
oversize = image.width > target_side_length or image.height > target_side_length
if opts.export_for_4chan and (oversize or os.stat(fullfn).st_size > 4 * 1024 * 1024):
ratio = image.width / image.height
if oversize and ratio > 1:
image = image.resize((target_side_length, image.height * target_side_length // image.width), LANCZOS)
elif oversize:
image = image.resize((image.width * target_side_length // image.height, target_side_length), LANCZOS)
image.save(fullfn_without_extension + ".jpg", quality=opts.jpeg_quality)
if opts.enable_pnginfo and info is not None:
piexif.insert(exif_bytes(), fullfn_without_extension + ".jpg")
if opts.save_txt and info is not None:
txt_fullfn = f"{fullfn_without_extension}.txt"
with open(txt_fullfn, "w", encoding="utf8") as file:
file.write(info + "\n")
else:
txt_fullfn = None
script_callbacks.image_saved_callback(params)
return fullfn, txt_fullfn
def image_data(data):
try:
image = Image.open(io.BytesIO(data))
textinfo = image.text["parameters"]
return textinfo, None
except Exception:
pass
try:
text = data.decode('utf8')
assert len(text) < 10000
return text, None
except Exception:
pass
return '', None
<|code_end|>
|
[Bug]: Torch.cuda error during Textual Inversion training
### Is there an existing issue for this?
- [X] I have searched the existing issues and checked the recent builds/commits
### What happened?
Something is not 100% for me either, because when I copy a previous step back and then resume the training, I get interesting torch errors. After that neither the preview nor the process updated on the UI. I stopped the workout, then resumed it, then it went fine for a while, but sometimes it came up again, sometimes not. I don't use `--xformers --medvram --precision full --no-half` options. My card is RTX 3060 12GB.
Traceback (most recent call last): | 260/20000 [06:55<1:16:29, 4.30it/s]
File "H:\Stable-Diffusion-Automatic\stable-diffusion-webui\venv\lib\site-packages\gradio\routes.py", line 275, in run_predict
output = await app.blocks.process_api(
File "H:\Stable-Diffusion-Automatic\stable-diffusion-webui\venv\lib\site-packages\gradio\blocks.py", line 787, in process_api
result = await self.call_function(fn_index, inputs, iterator)
File "H:\Stable-Diffusion-Automatic\stable-diffusion-webui\venv\lib\site-packages\gradio\blocks.py", line 694, in call_function
prediction = await anyio.to_thread.run_sync(
File "H:\Stable-Diffusion-Automatic\stable-diffusion-webui\venv\lib\site-packages\anyio\to_thread.py", line 31, in run_sync
return await get_asynclib().run_sync_in_worker_thread(
File "H:\Stable-Diffusion-Automatic\stable-diffusion-webui\venv\lib\site-packages\anyio\_backends\_asyncio.py", line 937, in run_sync_in_worker_thread
return await future
File "H:\Stable-Diffusion-Automatic\stable-diffusion-webui\venv\lib\site-packages\anyio\_backends\_asyncio.py", line 867, in run
result = context.run(func, *args)
File "H:\Stable-Diffusion-Automatic\stable-diffusion-webui\modules\ui.py", line 526, in <lambda>
fn=lambda: check_progress_call(id_part),
File "H:\Stable-Diffusion-Automatic\stable-diffusion-webui\modules\ui.py", line 284, in check_progress_call
shared.state.current_image = modules.sd_samplers.samples_to_image_grid(shared.state.current_latent)
File "H:\Stable-Diffusion-Automatic\stable-diffusion-webui\modules\sd_samplers.py", line 100, in samples_to_image_grid
return images.image_grid([single_sample_to_image(sample) for sample in samples])
File "H:\Stable-Diffusion-Automatic\stable-diffusion-webui\modules\sd_samplers.py", line 100, in <listcomp>
return images.image_grid([single_sample_to_image(sample) for sample in samples])
File "H:\Stable-Diffusion-Automatic\stable-diffusion-webui\modules\sd_samplers.py", line 88, in single_sample_to_image
x_sample = processing.decode_first_stage(shared.sd_model, sample.unsqueeze(0))[0]
File "H:\Stable-Diffusion-Automatic\stable-diffusion-webui\modules\processing.py", line 367, in decode_first_stage
x = model.decode_first_stage(x)
File "H:\Stable-Diffusion-Automatic\stable-diffusion-webui\venv\lib\site-packages\torch\autograd\grad_mode.py", line 27, in decorate_context
return func(*args, **kwargs)
File "H:\Stable-Diffusion-Automatic\stable-diffusion-webui\repositories\stable-diffusion\ldm\models\diffusion\ddpm.py", line 763, in decode_first_stage
return self.first_stage_model.decode(z)
File "H:\Stable-Diffusion-Automatic\stable-diffusion-webui\repositories\stable-diffusion\ldm\models\autoencoder.py", line 332, in decode
dec = self.decoder(z)
File "H:\Stable-Diffusion-Automatic\stable-diffusion-webui\venv\lib\site-packages\torch\nn\modules\module.py", line 1130, in _call_impl
return forward_call(*input, **kwargs)
File "H:\Stable-Diffusion-Automatic\stable-diffusion-webui\repositories\stable-diffusion\ldm\modules\diffusionmodules\model.py", line 553, in forward
h = self.up[i_level].block[i_block](h, temb)
File "H:\Stable-Diffusion-Automatic\stable-diffusion-webui\venv\lib\site-packages\torch\nn\modules\module.py", line 1130, in _call_impl
return forward_call(*input, **kwargs)
File "H:\Stable-Diffusion-Automatic\stable-diffusion-webui\repositories\stable-diffusion\ldm\modules\diffusionmodules\model.py", line 125, in forward
h = self.conv1(h)
File "H:\Stable-Diffusion-Automatic\stable-diffusion-webui\venv\lib\site-packages\torch\nn\modules\module.py", line 1130, in _call_impl
return forward_call(*input, **kwargs)
File "H:\Stable-Diffusion-Automatic\stable-diffusion-webui\venv\lib\site-packages\torch\nn\modules\conv.py", line 457, in forward
return self._conv_forward(input, self.weight, self.bias)
File "H:\Stable-Diffusion-Automatic\stable-diffusion-webui\venv\lib\site-packages\torch\nn\modules\conv.py", line 453, in _conv_forward
return F.conv2d(input, weight, bias, self.stride,
RuntimeError: Input type (torch.cuda.HalfTensor) and weight type (torch.HalfTensor) should be the same
### Steps to reproduce the problem
1. Create a Textual Inversion embedding with 4 vector, 18 images, Initialization text: caricature, image size is 512x512, Learning rate: 5e-04:200, 5e-05:500, 5e-06:800, 5e-07:1000 Max steps: 1000, preview and embedding save on every 50th step
2. Train embedding
3. Try the train again with a different name if you didn't get an error
### What should have happened?
In previous versions there was no error during training
### Commit where the problem happens
198a1ffcfc963a3d74674fad560e87dbebf7949f
### What platforms do you use to access UI ?
Windows
### What browsers do you use to access the UI ?
Google Chrome
### Command Line Arguments
```Shell
--ui-config-file 1my/ui-config-my.json --ui-settings-file 1my/config-my.json --autolaunch --gradio-img2img-tool color-sketch --vae-path "models\Stable-diffusion\newVAE.vae.pt"
```
### Additional information, context and logs
My train file content here:
```
a caricature art by [name]
a caricature, art by [name]
a caricature by [name]
art by [name]
```
[Bug]: Torch.cuda error during Textual Inversion training
### Is there an existing issue for this?
- [X] I have searched the existing issues and checked the recent builds/commits
### What happened?
Something is not 100% for me either, because when I copy a previous step back and then resume the training, I get interesting torch errors. After that neither the preview nor the process updated on the UI. I stopped the workout, then resumed it, then it went fine for a while, but sometimes it came up again, sometimes not. I don't use `--xformers --medvram --precision full --no-half` options. My card is RTX 3060 12GB.
Traceback (most recent call last): | 260/20000 [06:55<1:16:29, 4.30it/s]
File "H:\Stable-Diffusion-Automatic\stable-diffusion-webui\venv\lib\site-packages\gradio\routes.py", line 275, in run_predict
output = await app.blocks.process_api(
File "H:\Stable-Diffusion-Automatic\stable-diffusion-webui\venv\lib\site-packages\gradio\blocks.py", line 787, in process_api
result = await self.call_function(fn_index, inputs, iterator)
File "H:\Stable-Diffusion-Automatic\stable-diffusion-webui\venv\lib\site-packages\gradio\blocks.py", line 694, in call_function
prediction = await anyio.to_thread.run_sync(
File "H:\Stable-Diffusion-Automatic\stable-diffusion-webui\venv\lib\site-packages\anyio\to_thread.py", line 31, in run_sync
return await get_asynclib().run_sync_in_worker_thread(
File "H:\Stable-Diffusion-Automatic\stable-diffusion-webui\venv\lib\site-packages\anyio\_backends\_asyncio.py", line 937, in run_sync_in_worker_thread
return await future
File "H:\Stable-Diffusion-Automatic\stable-diffusion-webui\venv\lib\site-packages\anyio\_backends\_asyncio.py", line 867, in run
result = context.run(func, *args)
File "H:\Stable-Diffusion-Automatic\stable-diffusion-webui\modules\ui.py", line 526, in <lambda>
fn=lambda: check_progress_call(id_part),
File "H:\Stable-Diffusion-Automatic\stable-diffusion-webui\modules\ui.py", line 284, in check_progress_call
shared.state.current_image = modules.sd_samplers.samples_to_image_grid(shared.state.current_latent)
File "H:\Stable-Diffusion-Automatic\stable-diffusion-webui\modules\sd_samplers.py", line 100, in samples_to_image_grid
return images.image_grid([single_sample_to_image(sample) for sample in samples])
File "H:\Stable-Diffusion-Automatic\stable-diffusion-webui\modules\sd_samplers.py", line 100, in <listcomp>
return images.image_grid([single_sample_to_image(sample) for sample in samples])
File "H:\Stable-Diffusion-Automatic\stable-diffusion-webui\modules\sd_samplers.py", line 88, in single_sample_to_image
x_sample = processing.decode_first_stage(shared.sd_model, sample.unsqueeze(0))[0]
File "H:\Stable-Diffusion-Automatic\stable-diffusion-webui\modules\processing.py", line 367, in decode_first_stage
x = model.decode_first_stage(x)
File "H:\Stable-Diffusion-Automatic\stable-diffusion-webui\venv\lib\site-packages\torch\autograd\grad_mode.py", line 27, in decorate_context
return func(*args, **kwargs)
File "H:\Stable-Diffusion-Automatic\stable-diffusion-webui\repositories\stable-diffusion\ldm\models\diffusion\ddpm.py", line 763, in decode_first_stage
return self.first_stage_model.decode(z)
File "H:\Stable-Diffusion-Automatic\stable-diffusion-webui\repositories\stable-diffusion\ldm\models\autoencoder.py", line 332, in decode
dec = self.decoder(z)
File "H:\Stable-Diffusion-Automatic\stable-diffusion-webui\venv\lib\site-packages\torch\nn\modules\module.py", line 1130, in _call_impl
return forward_call(*input, **kwargs)
File "H:\Stable-Diffusion-Automatic\stable-diffusion-webui\repositories\stable-diffusion\ldm\modules\diffusionmodules\model.py", line 553, in forward
h = self.up[i_level].block[i_block](h, temb)
File "H:\Stable-Diffusion-Automatic\stable-diffusion-webui\venv\lib\site-packages\torch\nn\modules\module.py", line 1130, in _call_impl
return forward_call(*input, **kwargs)
File "H:\Stable-Diffusion-Automatic\stable-diffusion-webui\repositories\stable-diffusion\ldm\modules\diffusionmodules\model.py", line 125, in forward
h = self.conv1(h)
File "H:\Stable-Diffusion-Automatic\stable-diffusion-webui\venv\lib\site-packages\torch\nn\modules\module.py", line 1130, in _call_impl
return forward_call(*input, **kwargs)
File "H:\Stable-Diffusion-Automatic\stable-diffusion-webui\venv\lib\site-packages\torch\nn\modules\conv.py", line 457, in forward
return self._conv_forward(input, self.weight, self.bias)
File "H:\Stable-Diffusion-Automatic\stable-diffusion-webui\venv\lib\site-packages\torch\nn\modules\conv.py", line 453, in _conv_forward
return F.conv2d(input, weight, bias, self.stride,
RuntimeError: Input type (torch.cuda.HalfTensor) and weight type (torch.HalfTensor) should be the same
### Steps to reproduce the problem
1. Create a Textual Inversion embedding with 4 vector, 18 images, Initialization text: caricature, image size is 512x512, Learning rate: 5e-04:200, 5e-05:500, 5e-06:800, 5e-07:1000 Max steps: 1000, preview and embedding save on every 50th step
2. Train embedding
3. Try the train again with a different name if you didn't get an error
### What should have happened?
In previous versions there was no error during training
### Commit where the problem happens
198a1ffcfc963a3d74674fad560e87dbebf7949f
### What platforms do you use to access UI ?
Windows
### What browsers do you use to access the UI ?
Google Chrome
### Command Line Arguments
```Shell
--ui-config-file 1my/ui-config-my.json --ui-settings-file 1my/config-my.json --autolaunch --gradio-img2img-tool color-sketch --vae-path "models\Stable-diffusion\newVAE.vae.pt"
```
### Additional information, context and logs
My train file content here:
```
a caricature art by [name]
a caricature, art by [name]
a caricature by [name]
art by [name]
```
|
modules/hypernetworks/hypernetwork.py
<|code_start|>import csv
import datetime
import glob
import html
import os
import sys
import traceback
import inspect
import modules.textual_inversion.dataset
import torch
import tqdm
from einops import rearrange, repeat
from ldm.util import default
from modules import devices, processing, sd_models, shared, sd_samplers
from modules.textual_inversion import textual_inversion
from modules.textual_inversion.learn_schedule import LearnRateScheduler
from torch import einsum
from torch.nn.init import normal_, xavier_normal_, xavier_uniform_, kaiming_normal_, kaiming_uniform_, zeros_
from collections import defaultdict, deque
from statistics import stdev, mean
optimizer_dict = {optim_name : cls_obj for optim_name, cls_obj in inspect.getmembers(torch.optim, inspect.isclass) if optim_name != "Optimizer"}
class HypernetworkModule(torch.nn.Module):
multiplier = 1.0
activation_dict = {
"linear": torch.nn.Identity,
"relu": torch.nn.ReLU,
"leakyrelu": torch.nn.LeakyReLU,
"elu": torch.nn.ELU,
"swish": torch.nn.Hardswish,
"tanh": torch.nn.Tanh,
"sigmoid": torch.nn.Sigmoid,
}
activation_dict.update({cls_name.lower(): cls_obj for cls_name, cls_obj in inspect.getmembers(torch.nn.modules.activation) if inspect.isclass(cls_obj) and cls_obj.__module__ == 'torch.nn.modules.activation'})
def __init__(self, dim, state_dict=None, layer_structure=None, activation_func=None, weight_init='Normal',
add_layer_norm=False, use_dropout=False, activate_output=False, last_layer_dropout=False):
super().__init__()
assert layer_structure is not None, "layer_structure must not be None"
assert layer_structure[0] == 1, "Multiplier Sequence should start with size 1!"
assert layer_structure[-1] == 1, "Multiplier Sequence should end with size 1!"
linears = []
for i in range(len(layer_structure) - 1):
# Add a fully-connected layer
linears.append(torch.nn.Linear(int(dim * layer_structure[i]), int(dim * layer_structure[i+1])))
# Add an activation func except last layer
if activation_func == "linear" or activation_func is None or (i >= len(layer_structure) - 2 and not activate_output):
pass
elif activation_func in self.activation_dict:
linears.append(self.activation_dict[activation_func]())
else:
raise RuntimeError(f'hypernetwork uses an unsupported activation function: {activation_func}')
# Add layer normalization
if add_layer_norm:
linears.append(torch.nn.LayerNorm(int(dim * layer_structure[i+1])))
# Add dropout except last layer
if use_dropout and (i < len(layer_structure) - 3 or last_layer_dropout and i < len(layer_structure) - 2):
linears.append(torch.nn.Dropout(p=0.3))
self.linear = torch.nn.Sequential(*linears)
if state_dict is not None:
self.fix_old_state_dict(state_dict)
self.load_state_dict(state_dict)
else:
for layer in self.linear:
if type(layer) == torch.nn.Linear or type(layer) == torch.nn.LayerNorm:
w, b = layer.weight.data, layer.bias.data
if weight_init == "Normal" or type(layer) == torch.nn.LayerNorm:
normal_(w, mean=0.0, std=0.01)
normal_(b, mean=0.0, std=0)
elif weight_init == 'XavierUniform':
xavier_uniform_(w)
zeros_(b)
elif weight_init == 'XavierNormal':
xavier_normal_(w)
zeros_(b)
elif weight_init == 'KaimingUniform':
kaiming_uniform_(w, nonlinearity='leaky_relu' if 'leakyrelu' == activation_func else 'relu')
zeros_(b)
elif weight_init == 'KaimingNormal':
kaiming_normal_(w, nonlinearity='leaky_relu' if 'leakyrelu' == activation_func else 'relu')
zeros_(b)
else:
raise KeyError(f"Key {weight_init} is not defined as initialization!")
self.to(devices.device)
def fix_old_state_dict(self, state_dict):
changes = {
'linear1.bias': 'linear.0.bias',
'linear1.weight': 'linear.0.weight',
'linear2.bias': 'linear.1.bias',
'linear2.weight': 'linear.1.weight',
}
for fr, to in changes.items():
x = state_dict.get(fr, None)
if x is None:
continue
del state_dict[fr]
state_dict[to] = x
def forward(self, x):
return x + self.linear(x) * self.multiplier
def trainables(self):
layer_structure = []
for layer in self.linear:
if type(layer) == torch.nn.Linear or type(layer) == torch.nn.LayerNorm:
layer_structure += [layer.weight, layer.bias]
return layer_structure
def apply_strength(value=None):
HypernetworkModule.multiplier = value if value is not None else shared.opts.sd_hypernetwork_strength
class Hypernetwork:
filename = None
name = None
def __init__(self, name=None, enable_sizes=None, layer_structure=None, activation_func=None, weight_init=None, add_layer_norm=False, use_dropout=False, activate_output=False, **kwargs):
self.filename = None
self.name = name
self.layers = {}
self.step = 0
self.sd_checkpoint = None
self.sd_checkpoint_name = None
self.layer_structure = layer_structure
self.activation_func = activation_func
self.weight_init = weight_init
self.add_layer_norm = add_layer_norm
self.use_dropout = use_dropout
self.activate_output = activate_output
self.last_layer_dropout = kwargs['last_layer_dropout'] if 'last_layer_dropout' in kwargs else True
self.optimizer_name = None
self.optimizer_state_dict = None
for size in enable_sizes or []:
self.layers[size] = (
HypernetworkModule(size, None, self.layer_structure, self.activation_func, self.weight_init,
self.add_layer_norm, self.use_dropout, self.activate_output, last_layer_dropout=self.last_layer_dropout),
HypernetworkModule(size, None, self.layer_structure, self.activation_func, self.weight_init,
self.add_layer_norm, self.use_dropout, self.activate_output, last_layer_dropout=self.last_layer_dropout),
)
self.eval_mode()
def weights(self):
res = []
for k, layers in self.layers.items():
for layer in layers:
res += layer.parameters()
return res
def train_mode(self):
for k, layers in self.layers.items():
for layer in layers:
layer.train()
for param in layer.parameters():
param.requires_grad = True
def eval_mode(self):
for k, layers in self.layers.items():
for layer in layers:
layer.eval()
for param in layer.parameters():
param.requires_grad = False
def save(self, filename):
state_dict = {}
optimizer_saved_dict = {}
for k, v in self.layers.items():
state_dict[k] = (v[0].state_dict(), v[1].state_dict())
state_dict['step'] = self.step
state_dict['name'] = self.name
state_dict['layer_structure'] = self.layer_structure
state_dict['activation_func'] = self.activation_func
state_dict['is_layer_norm'] = self.add_layer_norm
state_dict['weight_initialization'] = self.weight_init
state_dict['use_dropout'] = self.use_dropout
state_dict['sd_checkpoint'] = self.sd_checkpoint
state_dict['sd_checkpoint_name'] = self.sd_checkpoint_name
state_dict['activate_output'] = self.activate_output
state_dict['last_layer_dropout'] = self.last_layer_dropout
if self.optimizer_name is not None:
optimizer_saved_dict['optimizer_name'] = self.optimizer_name
torch.save(state_dict, filename)
if shared.opts.save_optimizer_state and self.optimizer_state_dict:
optimizer_saved_dict['hash'] = sd_models.model_hash(filename)
optimizer_saved_dict['optimizer_state_dict'] = self.optimizer_state_dict
torch.save(optimizer_saved_dict, filename + '.optim')
def load(self, filename):
self.filename = filename
if self.name is None:
self.name = os.path.splitext(os.path.basename(filename))[0]
state_dict = torch.load(filename, map_location='cpu')
self.layer_structure = state_dict.get('layer_structure', [1, 2, 1])
print(self.layer_structure)
self.activation_func = state_dict.get('activation_func', None)
print(f"Activation function is {self.activation_func}")
self.weight_init = state_dict.get('weight_initialization', 'Normal')
print(f"Weight initialization is {self.weight_init}")
self.add_layer_norm = state_dict.get('is_layer_norm', False)
print(f"Layer norm is set to {self.add_layer_norm}")
self.use_dropout = state_dict.get('use_dropout', False)
print(f"Dropout usage is set to {self.use_dropout}" )
self.activate_output = state_dict.get('activate_output', True)
print(f"Activate last layer is set to {self.activate_output}")
self.last_layer_dropout = state_dict.get('last_layer_dropout', False)
optimizer_saved_dict = torch.load(self.filename + '.optim', map_location = 'cpu') if os.path.exists(self.filename + '.optim') else {}
self.optimizer_name = optimizer_saved_dict.get('optimizer_name', 'AdamW')
print(f"Optimizer name is {self.optimizer_name}")
if sd_models.model_hash(filename) == optimizer_saved_dict.get('hash', None):
self.optimizer_state_dict = optimizer_saved_dict.get('optimizer_state_dict', None)
else:
self.optimizer_state_dict = None
if self.optimizer_state_dict:
print("Loaded existing optimizer from checkpoint")
else:
print("No saved optimizer exists in checkpoint")
for size, sd in state_dict.items():
if type(size) == int:
self.layers[size] = (
HypernetworkModule(size, sd[0], self.layer_structure, self.activation_func, self.weight_init,
self.add_layer_norm, self.use_dropout, self.activate_output, last_layer_dropout=self.last_layer_dropout),
HypernetworkModule(size, sd[1], self.layer_structure, self.activation_func, self.weight_init,
self.add_layer_norm, self.use_dropout, self.activate_output, last_layer_dropout=self.last_layer_dropout),
)
self.name = state_dict.get('name', self.name)
self.step = state_dict.get('step', 0)
self.sd_checkpoint = state_dict.get('sd_checkpoint', None)
self.sd_checkpoint_name = state_dict.get('sd_checkpoint_name', None)
def list_hypernetworks(path):
res = {}
for filename in sorted(glob.iglob(os.path.join(path, '**/*.pt'), recursive=True)):
name = os.path.splitext(os.path.basename(filename))[0]
# Prevent a hypothetical "None.pt" from being listed.
if name != "None":
res[name + f"({sd_models.model_hash(filename)})"] = filename
return res
def load_hypernetwork(filename):
path = shared.hypernetworks.get(filename, None)
# Prevent any file named "None.pt" from being loaded.
if path is not None and filename != "None":
print(f"Loading hypernetwork {filename}")
try:
shared.loaded_hypernetwork = Hypernetwork()
shared.loaded_hypernetwork.load(path)
except Exception:
print(f"Error loading hypernetwork {path}", file=sys.stderr)
print(traceback.format_exc(), file=sys.stderr)
else:
if shared.loaded_hypernetwork is not None:
print(f"Unloading hypernetwork")
shared.loaded_hypernetwork = None
def find_closest_hypernetwork_name(search: str):
if not search:
return None
search = search.lower()
applicable = [name for name in shared.hypernetworks if search in name.lower()]
if not applicable:
return None
applicable = sorted(applicable, key=lambda name: len(name))
return applicable[0]
def apply_hypernetwork(hypernetwork, context, layer=None):
hypernetwork_layers = (hypernetwork.layers if hypernetwork is not None else {}).get(context.shape[2], None)
if hypernetwork_layers is None:
return context, context
if layer is not None:
layer.hyper_k = hypernetwork_layers[0]
layer.hyper_v = hypernetwork_layers[1]
context_k = hypernetwork_layers[0](context)
context_v = hypernetwork_layers[1](context)
return context_k, context_v
def attention_CrossAttention_forward(self, x, context=None, mask=None):
h = self.heads
q = self.to_q(x)
context = default(context, x)
context_k, context_v = apply_hypernetwork(shared.loaded_hypernetwork, context, self)
k = self.to_k(context_k)
v = self.to_v(context_v)
q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> (b h) n d', h=h), (q, k, v))
sim = einsum('b i d, b j d -> b i j', q, k) * self.scale
if mask is not None:
mask = rearrange(mask, 'b ... -> b (...)')
max_neg_value = -torch.finfo(sim.dtype).max
mask = repeat(mask, 'b j -> (b h) () j', h=h)
sim.masked_fill_(~mask, max_neg_value)
# attention, what we cannot get enough of
attn = sim.softmax(dim=-1)
out = einsum('b i j, b j d -> b i d', attn, v)
out = rearrange(out, '(b h) n d -> b n (h d)', h=h)
return self.to_out(out)
def stack_conds(conds):
if len(conds) == 1:
return torch.stack(conds)
# same as in reconstruct_multicond_batch
token_count = max([x.shape[0] for x in conds])
for i in range(len(conds)):
if conds[i].shape[0] != token_count:
last_vector = conds[i][-1:]
last_vector_repeated = last_vector.repeat([token_count - conds[i].shape[0], 1])
conds[i] = torch.vstack([conds[i], last_vector_repeated])
return torch.stack(conds)
def statistics(data):
if len(data) < 2:
std = 0
else:
std = stdev(data)
total_information = f"loss:{mean(data):.3f}" + u"\u00B1" + f"({std/ (len(data) ** 0.5):.3f})"
recent_data = data[-32:]
if len(recent_data) < 2:
std = 0
else:
std = stdev(recent_data)
recent_information = f"recent 32 loss:{mean(recent_data):.3f}" + u"\u00B1" + f"({std / (len(recent_data) ** 0.5):.3f})"
return total_information, recent_information
def report_statistics(loss_info:dict):
keys = sorted(loss_info.keys(), key=lambda x: sum(loss_info[x]) / len(loss_info[x]))
for key in keys:
try:
print("Loss statistics for file " + key)
info, recent = statistics(list(loss_info[key]))
print(info)
print(recent)
except Exception as e:
print(e)
def train_hypernetwork(hypernetwork_name, learn_rate, batch_size, gradient_step, data_root, log_directory, training_width, training_height, steps, shuffle_tags, tag_drop_out, latent_sampling_method, create_image_every, save_hypernetwork_every, template_file, preview_from_txt2img, preview_prompt, preview_negative_prompt, preview_steps, preview_sampler_index, preview_cfg_scale, preview_seed, preview_width, preview_height):
# images allows training previews to have infotext. Importing it at the top causes a circular import problem.
from modules import images
save_hypernetwork_every = save_hypernetwork_every or 0
create_image_every = create_image_every or 0
textual_inversion.validate_train_inputs(hypernetwork_name, learn_rate, batch_size, gradient_step, data_root, template_file, steps, save_hypernetwork_every, create_image_every, log_directory, name="hypernetwork")
path = shared.hypernetworks.get(hypernetwork_name, None)
shared.loaded_hypernetwork = Hypernetwork()
shared.loaded_hypernetwork.load(path)
shared.state.textinfo = "Initializing hypernetwork training..."
shared.state.job_count = steps
hypernetwork_name = hypernetwork_name.rsplit('(', 1)[0]
filename = os.path.join(shared.cmd_opts.hypernetwork_dir, f'{hypernetwork_name}.pt')
log_directory = os.path.join(log_directory, datetime.datetime.now().strftime("%Y-%m-%d"), hypernetwork_name)
unload = shared.opts.unload_models_when_training
if save_hypernetwork_every > 0:
hypernetwork_dir = os.path.join(log_directory, "hypernetworks")
os.makedirs(hypernetwork_dir, exist_ok=True)
else:
hypernetwork_dir = None
if create_image_every > 0:
images_dir = os.path.join(log_directory, "images")
os.makedirs(images_dir, exist_ok=True)
else:
images_dir = None
hypernetwork = shared.loaded_hypernetwork
checkpoint = sd_models.select_checkpoint()
initial_step = hypernetwork.step or 0
if initial_step >= steps:
shared.state.textinfo = f"Model has already been trained beyond specified max steps"
return hypernetwork, filename
scheduler = LearnRateScheduler(learn_rate, steps, initial_step)
# dataset loading may take a while, so input validations and early returns should be done before this
shared.state.textinfo = f"Preparing dataset from {html.escape(data_root)}..."
pin_memory = shared.opts.pin_memory
ds = modules.textual_inversion.dataset.PersonalizedBase(data_root=data_root, width=training_width, height=training_height, repeats=shared.opts.training_image_repeats_per_epoch, placeholder_token=hypernetwork_name, model=shared.sd_model, cond_model=shared.sd_model.cond_stage_model, device=devices.device, template_file=template_file, include_cond=True, batch_size=batch_size, gradient_step=gradient_step, shuffle_tags=shuffle_tags, tag_drop_out=tag_drop_out, latent_sampling_method=latent_sampling_method)
latent_sampling_method = ds.latent_sampling_method
dl = modules.textual_inversion.dataset.PersonalizedDataLoader(ds, latent_sampling_method=latent_sampling_method, batch_size=ds.batch_size, pin_memory=pin_memory)
if unload:
shared.sd_model.cond_stage_model.to(devices.cpu)
shared.sd_model.first_stage_model.to(devices.cpu)
weights = hypernetwork.weights()
hypernetwork.train_mode()
# Here we use optimizer from saved HN, or we can specify as UI option.
if hypernetwork.optimizer_name in optimizer_dict:
optimizer = optimizer_dict[hypernetwork.optimizer_name](params=weights, lr=scheduler.learn_rate)
optimizer_name = hypernetwork.optimizer_name
else:
print(f"Optimizer type {hypernetwork.optimizer_name} is not defined!")
optimizer = torch.optim.AdamW(params=weights, lr=scheduler.learn_rate)
optimizer_name = 'AdamW'
if hypernetwork.optimizer_state_dict: # This line must be changed if Optimizer type can be different from saved optimizer.
try:
optimizer.load_state_dict(hypernetwork.optimizer_state_dict)
except RuntimeError as e:
print("Cannot resume from saved optimizer!")
print(e)
scaler = torch.cuda.amp.GradScaler()
batch_size = ds.batch_size
gradient_step = ds.gradient_step
# n steps = batch_size * gradient_step * n image processed
steps_per_epoch = len(ds) // batch_size // gradient_step
max_steps_per_epoch = len(ds) // batch_size - (len(ds) // batch_size) % gradient_step
loss_step = 0
_loss_step = 0 #internal
# size = len(ds.indexes)
# loss_dict = defaultdict(lambda : deque(maxlen = 1024))
# losses = torch.zeros((size,))
# previous_mean_losses = [0]
# previous_mean_loss = 0
# print("Mean loss of {} elements".format(size))
steps_without_grad = 0
last_saved_file = "<none>"
last_saved_image = "<none>"
forced_filename = "<none>"
pbar = tqdm.tqdm(total=steps - initial_step)
try:
for i in range((steps-initial_step) * gradient_step):
if scheduler.finished:
break
if shared.state.interrupted:
break
for j, batch in enumerate(dl):
# works as a drop_last=True for gradient accumulation
if j == max_steps_per_epoch:
break
scheduler.apply(optimizer, hypernetwork.step)
if scheduler.finished:
break
if shared.state.interrupted:
break
with devices.autocast():
x = batch.latent_sample.to(devices.device, non_blocking=pin_memory)
if tag_drop_out != 0 or shuffle_tags:
shared.sd_model.cond_stage_model.to(devices.device)
c = shared.sd_model.cond_stage_model(batch.cond_text).to(devices.device, non_blocking=pin_memory)
shared.sd_model.cond_stage_model.to(devices.cpu)
else:
c = stack_conds(batch.cond).to(devices.device, non_blocking=pin_memory)
loss = shared.sd_model(x, c)[0] / gradient_step
del x
del c
_loss_step += loss.item()
scaler.scale(loss).backward()
# go back until we reach gradient accumulation steps
if (j + 1) % gradient_step != 0:
continue
# print(f"grad:{weights[0].grad.detach().cpu().abs().mean().item():.7f}")
# scaler.unscale_(optimizer)
# print(f"grad:{weights[0].grad.detach().cpu().abs().mean().item():.15f}")
# torch.nn.utils.clip_grad_norm_(weights, max_norm=1.0)
# print(f"grad:{weights[0].grad.detach().cpu().abs().mean().item():.15f}")
scaler.step(optimizer)
scaler.update()
hypernetwork.step += 1
pbar.update()
optimizer.zero_grad(set_to_none=True)
loss_step = _loss_step
_loss_step = 0
steps_done = hypernetwork.step + 1
epoch_num = hypernetwork.step // steps_per_epoch
epoch_step = hypernetwork.step % steps_per_epoch
pbar.set_description(f"[Epoch {epoch_num}: {epoch_step+1}/{steps_per_epoch}]loss: {loss_step:.7f}")
if hypernetwork_dir is not None and steps_done % save_hypernetwork_every == 0:
# Before saving, change name to match current checkpoint.
hypernetwork_name_every = f'{hypernetwork_name}-{steps_done}'
last_saved_file = os.path.join(hypernetwork_dir, f'{hypernetwork_name_every}.pt')
hypernetwork.optimizer_name = optimizer_name
if shared.opts.save_optimizer_state:
hypernetwork.optimizer_state_dict = optimizer.state_dict()
save_hypernetwork(hypernetwork, checkpoint, hypernetwork_name, last_saved_file)
hypernetwork.optimizer_state_dict = None # dereference it after saving, to save memory.
textual_inversion.write_loss(log_directory, "hypernetwork_loss.csv", hypernetwork.step, steps_per_epoch, {
"loss": f"{loss_step:.7f}",
"learn_rate": scheduler.learn_rate
})
if images_dir is not None and steps_done % create_image_every == 0:
forced_filename = f'{hypernetwork_name}-{steps_done}'
last_saved_image = os.path.join(images_dir, forced_filename)
hypernetwork.eval_mode()
shared.sd_model.cond_stage_model.to(devices.device)
shared.sd_model.first_stage_model.to(devices.device)
p = processing.StableDiffusionProcessingTxt2Img(
sd_model=shared.sd_model,
do_not_save_grid=True,
do_not_save_samples=True,
)
if preview_from_txt2img:
p.prompt = preview_prompt
p.negative_prompt = preview_negative_prompt
p.steps = preview_steps
p.sampler_name = sd_samplers.samplers[preview_sampler_index].name
p.cfg_scale = preview_cfg_scale
p.seed = preview_seed
p.width = preview_width
p.height = preview_height
else:
p.prompt = batch.cond_text[0]
p.steps = 20
p.width = training_width
p.height = training_height
preview_text = p.prompt
processed = processing.process_images(p)
image = processed.images[0] if len(processed.images) > 0 else None
if unload:
shared.sd_model.cond_stage_model.to(devices.cpu)
shared.sd_model.first_stage_model.to(devices.cpu)
hypernetwork.train_mode()
if image is not None:
shared.state.current_image = image
last_saved_image, last_text_info = images.save_image(image, images_dir, "", p.seed, p.prompt, shared.opts.samples_format, processed.infotexts[0], p=p, forced_filename=forced_filename, save_to_dirs=False)
last_saved_image += f", prompt: {preview_text}"
shared.state.job_no = hypernetwork.step
shared.state.textinfo = f"""
<p>
Loss: {loss_step:.7f}<br/>
Step: {steps_done}<br/>
Last prompt: {html.escape(batch.cond_text[0])}<br/>
Last saved hypernetwork: {html.escape(last_saved_file)}<br/>
Last saved image: {html.escape(last_saved_image)}<br/>
</p>
"""
except Exception:
print(traceback.format_exc(), file=sys.stderr)
finally:
pbar.leave = False
pbar.close()
hypernetwork.eval_mode()
#report_statistics(loss_dict)
filename = os.path.join(shared.cmd_opts.hypernetwork_dir, f'{hypernetwork_name}.pt')
hypernetwork.optimizer_name = optimizer_name
if shared.opts.save_optimizer_state:
hypernetwork.optimizer_state_dict = optimizer.state_dict()
save_hypernetwork(hypernetwork, checkpoint, hypernetwork_name, filename)
del optimizer
hypernetwork.optimizer_state_dict = None # dereference it after saving, to save memory.
shared.sd_model.cond_stage_model.to(devices.device)
shared.sd_model.first_stage_model.to(devices.device)
return hypernetwork, filename
def save_hypernetwork(hypernetwork, checkpoint, hypernetwork_name, filename):
old_hypernetwork_name = hypernetwork.name
old_sd_checkpoint = hypernetwork.sd_checkpoint if hasattr(hypernetwork, "sd_checkpoint") else None
old_sd_checkpoint_name = hypernetwork.sd_checkpoint_name if hasattr(hypernetwork, "sd_checkpoint_name") else None
try:
hypernetwork.sd_checkpoint = checkpoint.hash
hypernetwork.sd_checkpoint_name = checkpoint.model_name
hypernetwork.name = hypernetwork_name
hypernetwork.save(filename)
except:
hypernetwork.sd_checkpoint = old_sd_checkpoint
hypernetwork.sd_checkpoint_name = old_sd_checkpoint_name
hypernetwork.name = old_hypernetwork_name
raise
<|code_end|>
modules/textual_inversion/textual_inversion.py
<|code_start|>import os
import sys
import traceback
import torch
import tqdm
import html
import datetime
import csv
from PIL import Image, PngImagePlugin
from modules import shared, devices, sd_hijack, processing, sd_models, images, sd_samplers
import modules.textual_inversion.dataset
from modules.textual_inversion.learn_schedule import LearnRateScheduler
from modules.textual_inversion.image_embedding import (embedding_to_b64, embedding_from_b64,
insert_image_data_embed, extract_image_data_embed,
caption_image_overlay)
class Embedding:
def __init__(self, vec, name, step=None):
self.vec = vec
self.name = name
self.step = step
self.cached_checksum = None
self.sd_checkpoint = None
self.sd_checkpoint_name = None
def save(self, filename):
embedding_data = {
"string_to_token": {"*": 265},
"string_to_param": {"*": self.vec},
"name": self.name,
"step": self.step,
"sd_checkpoint": self.sd_checkpoint,
"sd_checkpoint_name": self.sd_checkpoint_name,
}
torch.save(embedding_data, filename)
def checksum(self):
if self.cached_checksum is not None:
return self.cached_checksum
def const_hash(a):
r = 0
for v in a:
r = (r * 281 ^ int(v) * 997) & 0xFFFFFFFF
return r
self.cached_checksum = f'{const_hash(self.vec.reshape(-1) * 100) & 0xffff:04x}'
return self.cached_checksum
class EmbeddingDatabase:
def __init__(self, embeddings_dir):
self.ids_lookup = {}
self.word_embeddings = {}
self.dir_mtime = None
self.embeddings_dir = embeddings_dir
def register_embedding(self, embedding, model):
self.word_embeddings[embedding.name] = embedding
# TODO changing between clip and open clip changes tokenization, which will cause embeddings to stop working
ids = model.cond_stage_model.tokenize([embedding.name])[0]
first_id = ids[0]
if first_id not in self.ids_lookup:
self.ids_lookup[first_id] = []
self.ids_lookup[first_id] = sorted(self.ids_lookup[first_id] + [(ids, embedding)], key=lambda x: len(x[0]), reverse=True)
return embedding
def load_textual_inversion_embeddings(self):
mt = os.path.getmtime(self.embeddings_dir)
if self.dir_mtime is not None and mt <= self.dir_mtime:
return
self.dir_mtime = mt
self.ids_lookup.clear()
self.word_embeddings.clear()
def process_file(path, filename):
name = os.path.splitext(filename)[0]
data = []
if os.path.splitext(filename.upper())[-1] in ['.PNG', '.WEBP', '.JXL', '.AVIF']:
embed_image = Image.open(path)
if hasattr(embed_image, 'text') and 'sd-ti-embedding' in embed_image.text:
data = embedding_from_b64(embed_image.text['sd-ti-embedding'])
name = data.get('name', name)
else:
data = extract_image_data_embed(embed_image)
name = data.get('name', name)
else:
data = torch.load(path, map_location="cpu")
# textual inversion embeddings
if 'string_to_param' in data:
param_dict = data['string_to_param']
if hasattr(param_dict, '_parameters'):
param_dict = getattr(param_dict, '_parameters') # fix for torch 1.12.1 loading saved file from torch 1.11
assert len(param_dict) == 1, 'embedding file has multiple terms in it'
emb = next(iter(param_dict.items()))[1]
# diffuser concepts
elif type(data) == dict and type(next(iter(data.values()))) == torch.Tensor:
assert len(data.keys()) == 1, 'embedding file has multiple terms in it'
emb = next(iter(data.values()))
if len(emb.shape) == 1:
emb = emb.unsqueeze(0)
else:
raise Exception(f"Couldn't identify {filename} as neither textual inversion embedding nor diffuser concept.")
vec = emb.detach().to(devices.device, dtype=torch.float32)
embedding = Embedding(vec, name)
embedding.step = data.get('step', None)
embedding.sd_checkpoint = data.get('sd_checkpoint', None)
embedding.sd_checkpoint_name = data.get('sd_checkpoint_name', None)
self.register_embedding(embedding, shared.sd_model)
for fn in os.listdir(self.embeddings_dir):
try:
fullfn = os.path.join(self.embeddings_dir, fn)
if os.stat(fullfn).st_size == 0:
continue
process_file(fullfn, fn)
except Exception:
print(f"Error loading emedding {fn}:", file=sys.stderr)
print(traceback.format_exc(), file=sys.stderr)
continue
print(f"Loaded a total of {len(self.word_embeddings)} textual inversion embeddings.")
print("Embeddings:", ', '.join(self.word_embeddings.keys()))
def find_embedding_at_position(self, tokens, offset):
token = tokens[offset]
possible_matches = self.ids_lookup.get(token, None)
if possible_matches is None:
return None, None
for ids, embedding in possible_matches:
if tokens[offset:offset + len(ids)] == ids:
return embedding, len(ids)
return None, None
def create_embedding(name, num_vectors_per_token, overwrite_old, init_text='*'):
cond_model = shared.sd_model.cond_stage_model
with devices.autocast():
cond_model([""]) # will send cond model to GPU if lowvram/medvram is active
embedded = cond_model.encode_embedding_init_text(init_text, num_vectors_per_token)
vec = torch.zeros((num_vectors_per_token, embedded.shape[1]), device=devices.device)
for i in range(num_vectors_per_token):
vec[i] = embedded[i * int(embedded.shape[0]) // num_vectors_per_token]
# Remove illegal characters from name.
name = "".join( x for x in name if (x.isalnum() or x in "._- "))
fn = os.path.join(shared.cmd_opts.embeddings_dir, f"{name}.pt")
if not overwrite_old:
assert not os.path.exists(fn), f"file {fn} already exists"
embedding = Embedding(vec, name)
embedding.step = 0
embedding.save(fn)
return fn
def write_loss(log_directory, filename, step, epoch_len, values):
if shared.opts.training_write_csv_every == 0:
return
if step % shared.opts.training_write_csv_every != 0:
return
write_csv_header = False if os.path.exists(os.path.join(log_directory, filename)) else True
with open(os.path.join(log_directory, filename), "a+", newline='') as fout:
csv_writer = csv.DictWriter(fout, fieldnames=["step", "epoch", "epoch_step", *(values.keys())])
if write_csv_header:
csv_writer.writeheader()
epoch = (step - 1) // epoch_len
epoch_step = (step - 1) % epoch_len
csv_writer.writerow({
"step": step,
"epoch": epoch,
"epoch_step": epoch_step,
**values,
})
def validate_train_inputs(model_name, learn_rate, batch_size, gradient_step, data_root, template_file, steps, save_model_every, create_image_every, log_directory, name="embedding"):
assert model_name, f"{name} not selected"
assert learn_rate, "Learning rate is empty or 0"
assert isinstance(batch_size, int), "Batch size must be integer"
assert batch_size > 0, "Batch size must be positive"
assert isinstance(gradient_step, int), "Gradient accumulation step must be integer"
assert gradient_step > 0, "Gradient accumulation step must be positive"
assert data_root, "Dataset directory is empty"
assert os.path.isdir(data_root), "Dataset directory doesn't exist"
assert os.listdir(data_root), "Dataset directory is empty"
assert template_file, "Prompt template file is empty"
assert os.path.isfile(template_file), "Prompt template file doesn't exist"
assert steps, "Max steps is empty or 0"
assert isinstance(steps, int), "Max steps must be integer"
assert steps > 0 , "Max steps must be positive"
assert isinstance(save_model_every, int), "Save {name} must be integer"
assert save_model_every >= 0 , "Save {name} must be positive or 0"
assert isinstance(create_image_every, int), "Create image must be integer"
assert create_image_every >= 0 , "Create image must be positive or 0"
if save_model_every or create_image_every:
assert log_directory, "Log directory is empty"
def train_embedding(embedding_name, learn_rate, batch_size, gradient_step, data_root, log_directory, training_width, training_height, steps, shuffle_tags, tag_drop_out, latent_sampling_method, create_image_every, save_embedding_every, template_file, save_image_with_stored_embedding, preview_from_txt2img, preview_prompt, preview_negative_prompt, preview_steps, preview_sampler_index, preview_cfg_scale, preview_seed, preview_width, preview_height):
save_embedding_every = save_embedding_every or 0
create_image_every = create_image_every or 0
validate_train_inputs(embedding_name, learn_rate, batch_size, gradient_step, data_root, template_file, steps, save_embedding_every, create_image_every, log_directory, name="embedding")
shared.state.textinfo = "Initializing textual inversion training..."
shared.state.job_count = steps
filename = os.path.join(shared.cmd_opts.embeddings_dir, f'{embedding_name}.pt')
log_directory = os.path.join(log_directory, datetime.datetime.now().strftime("%Y-%m-%d"), embedding_name)
unload = shared.opts.unload_models_when_training
if save_embedding_every > 0:
embedding_dir = os.path.join(log_directory, "embeddings")
os.makedirs(embedding_dir, exist_ok=True)
else:
embedding_dir = None
if create_image_every > 0:
images_dir = os.path.join(log_directory, "images")
os.makedirs(images_dir, exist_ok=True)
else:
images_dir = None
if create_image_every > 0 and save_image_with_stored_embedding:
images_embeds_dir = os.path.join(log_directory, "image_embeddings")
os.makedirs(images_embeds_dir, exist_ok=True)
else:
images_embeds_dir = None
hijack = sd_hijack.model_hijack
embedding = hijack.embedding_db.word_embeddings[embedding_name]
checkpoint = sd_models.select_checkpoint()
initial_step = embedding.step or 0
if initial_step >= steps:
shared.state.textinfo = f"Model has already been trained beyond specified max steps"
return embedding, filename
scheduler = LearnRateScheduler(learn_rate, steps, initial_step)
# dataset loading may take a while, so input validations and early returns should be done before this
shared.state.textinfo = f"Preparing dataset from {html.escape(data_root)}..."
pin_memory = shared.opts.pin_memory
ds = modules.textual_inversion.dataset.PersonalizedBase(data_root=data_root, width=training_width, height=training_height, repeats=shared.opts.training_image_repeats_per_epoch, placeholder_token=embedding_name, model=shared.sd_model, cond_model=shared.sd_model.cond_stage_model, device=devices.device, template_file=template_file, batch_size=batch_size, gradient_step=gradient_step, shuffle_tags=shuffle_tags, tag_drop_out=tag_drop_out, latent_sampling_method=latent_sampling_method)
latent_sampling_method = ds.latent_sampling_method
dl = modules.textual_inversion.dataset.PersonalizedDataLoader(ds, latent_sampling_method=latent_sampling_method, batch_size=ds.batch_size, pin_memory=pin_memory)
if unload:
shared.sd_model.first_stage_model.to(devices.cpu)
embedding.vec.requires_grad = True
optimizer = torch.optim.AdamW([embedding.vec], lr=scheduler.learn_rate, weight_decay=0.0)
scaler = torch.cuda.amp.GradScaler()
batch_size = ds.batch_size
gradient_step = ds.gradient_step
# n steps = batch_size * gradient_step * n image processed
steps_per_epoch = len(ds) // batch_size // gradient_step
max_steps_per_epoch = len(ds) // batch_size - (len(ds) // batch_size) % gradient_step
loss_step = 0
_loss_step = 0 #internal
last_saved_file = "<none>"
last_saved_image = "<none>"
forced_filename = "<none>"
embedding_yet_to_be_embedded = False
pbar = tqdm.tqdm(total=steps - initial_step)
try:
for i in range((steps-initial_step) * gradient_step):
if scheduler.finished:
break
if shared.state.interrupted:
break
for j, batch in enumerate(dl):
# works as a drop_last=True for gradient accumulation
if j == max_steps_per_epoch:
break
scheduler.apply(optimizer, embedding.step)
if scheduler.finished:
break
if shared.state.interrupted:
break
with devices.autocast():
# c = stack_conds(batch.cond).to(devices.device)
# mask = torch.tensor(batch.emb_index).to(devices.device, non_blocking=pin_memory)
# print(mask)
# c[:, 1:1+embedding.vec.shape[0]] = embedding.vec.to(devices.device, non_blocking=pin_memory)
x = batch.latent_sample.to(devices.device, non_blocking=pin_memory)
c = shared.sd_model.cond_stage_model(batch.cond_text)
loss = shared.sd_model(x, c)[0] / gradient_step
del x
_loss_step += loss.item()
scaler.scale(loss).backward()
# go back until we reach gradient accumulation steps
if (j + 1) % gradient_step != 0:
continue
scaler.step(optimizer)
scaler.update()
embedding.step += 1
pbar.update()
optimizer.zero_grad(set_to_none=True)
loss_step = _loss_step
_loss_step = 0
steps_done = embedding.step + 1
epoch_num = embedding.step // steps_per_epoch
epoch_step = embedding.step % steps_per_epoch
pbar.set_description(f"[Epoch {epoch_num}: {epoch_step+1}/{steps_per_epoch}]loss: {loss_step:.7f}")
if embedding_dir is not None and steps_done % save_embedding_every == 0:
# Before saving, change name to match current checkpoint.
embedding_name_every = f'{embedding_name}-{steps_done}'
last_saved_file = os.path.join(embedding_dir, f'{embedding_name_every}.pt')
#if shared.opts.save_optimizer_state:
#embedding.optimizer_state_dict = optimizer.state_dict()
save_embedding(embedding, checkpoint, embedding_name_every, last_saved_file, remove_cached_checksum=True)
embedding_yet_to_be_embedded = True
write_loss(log_directory, "textual_inversion_loss.csv", embedding.step, steps_per_epoch, {
"loss": f"{loss_step:.7f}",
"learn_rate": scheduler.learn_rate
})
if images_dir is not None and steps_done % create_image_every == 0:
forced_filename = f'{embedding_name}-{steps_done}'
last_saved_image = os.path.join(images_dir, forced_filename)
shared.sd_model.first_stage_model.to(devices.device)
p = processing.StableDiffusionProcessingTxt2Img(
sd_model=shared.sd_model,
do_not_save_grid=True,
do_not_save_samples=True,
do_not_reload_embeddings=True,
)
if preview_from_txt2img:
p.prompt = preview_prompt
p.negative_prompt = preview_negative_prompt
p.steps = preview_steps
p.sampler_name = sd_samplers.samplers[preview_sampler_index].name
p.cfg_scale = preview_cfg_scale
p.seed = preview_seed
p.width = preview_width
p.height = preview_height
else:
p.prompt = batch.cond_text[0]
p.steps = 20
p.width = training_width
p.height = training_height
preview_text = p.prompt
processed = processing.process_images(p)
image = processed.images[0] if len(processed.images) > 0 else None
if unload:
shared.sd_model.first_stage_model.to(devices.cpu)
if image is not None:
shared.state.current_image = image
last_saved_image, last_text_info = images.save_image(image, images_dir, "", p.seed, p.prompt, shared.opts.samples_format, processed.infotexts[0], p=p, forced_filename=forced_filename, save_to_dirs=False)
last_saved_image += f", prompt: {preview_text}"
if save_image_with_stored_embedding and os.path.exists(last_saved_file) and embedding_yet_to_be_embedded:
last_saved_image_chunks = os.path.join(images_embeds_dir, f'{embedding_name}-{steps_done}.png')
info = PngImagePlugin.PngInfo()
data = torch.load(last_saved_file)
info.add_text("sd-ti-embedding", embedding_to_b64(data))
title = "<{}>".format(data.get('name', '???'))
try:
vectorSize = list(data['string_to_param'].values())[0].shape[0]
except Exception as e:
vectorSize = '?'
checkpoint = sd_models.select_checkpoint()
footer_left = checkpoint.model_name
footer_mid = '[{}]'.format(checkpoint.hash)
footer_right = '{}v {}s'.format(vectorSize, steps_done)
captioned_image = caption_image_overlay(image, title, footer_left, footer_mid, footer_right)
captioned_image = insert_image_data_embed(captioned_image, data)
captioned_image.save(last_saved_image_chunks, "PNG", pnginfo=info)
embedding_yet_to_be_embedded = False
last_saved_image, last_text_info = images.save_image(image, images_dir, "", p.seed, p.prompt, shared.opts.samples_format, processed.infotexts[0], p=p, forced_filename=forced_filename, save_to_dirs=False)
last_saved_image += f", prompt: {preview_text}"
shared.state.job_no = embedding.step
shared.state.textinfo = f"""
<p>
Loss: {loss_step:.7f}<br/>
Step: {steps_done}<br/>
Last prompt: {html.escape(batch.cond_text[0])}<br/>
Last saved embedding: {html.escape(last_saved_file)}<br/>
Last saved image: {html.escape(last_saved_image)}<br/>
</p>
"""
filename = os.path.join(shared.cmd_opts.embeddings_dir, f'{embedding_name}.pt')
save_embedding(embedding, checkpoint, embedding_name, filename, remove_cached_checksum=True)
except Exception:
print(traceback.format_exc(), file=sys.stderr)
pass
finally:
pbar.leave = False
pbar.close()
shared.sd_model.first_stage_model.to(devices.device)
return embedding, filename
def save_embedding(embedding, checkpoint, embedding_name, filename, remove_cached_checksum=True):
old_embedding_name = embedding.name
old_sd_checkpoint = embedding.sd_checkpoint if hasattr(embedding, "sd_checkpoint") else None
old_sd_checkpoint_name = embedding.sd_checkpoint_name if hasattr(embedding, "sd_checkpoint_name") else None
old_cached_checksum = embedding.cached_checksum if hasattr(embedding, "cached_checksum") else None
try:
embedding.sd_checkpoint = checkpoint.hash
embedding.sd_checkpoint_name = checkpoint.model_name
if remove_cached_checksum:
embedding.cached_checksum = None
embedding.name = embedding_name
embedding.save(filename)
except:
embedding.sd_checkpoint = old_sd_checkpoint
embedding.sd_checkpoint_name = old_sd_checkpoint_name
embedding.name = old_embedding_name
embedding.cached_checksum = old_cached_checksum
raise
<|code_end|>
|
modules/hypernetworks/hypernetwork.py
<|code_start|>import csv
import datetime
import glob
import html
import os
import sys
import traceback
import inspect
import modules.textual_inversion.dataset
import torch
import tqdm
from einops import rearrange, repeat
from ldm.util import default
from modules import devices, processing, sd_models, shared, sd_samplers
from modules.textual_inversion import textual_inversion
from modules.textual_inversion.learn_schedule import LearnRateScheduler
from torch import einsum
from torch.nn.init import normal_, xavier_normal_, xavier_uniform_, kaiming_normal_, kaiming_uniform_, zeros_
from collections import defaultdict, deque
from statistics import stdev, mean
optimizer_dict = {optim_name : cls_obj for optim_name, cls_obj in inspect.getmembers(torch.optim, inspect.isclass) if optim_name != "Optimizer"}
class HypernetworkModule(torch.nn.Module):
multiplier = 1.0
activation_dict = {
"linear": torch.nn.Identity,
"relu": torch.nn.ReLU,
"leakyrelu": torch.nn.LeakyReLU,
"elu": torch.nn.ELU,
"swish": torch.nn.Hardswish,
"tanh": torch.nn.Tanh,
"sigmoid": torch.nn.Sigmoid,
}
activation_dict.update({cls_name.lower(): cls_obj for cls_name, cls_obj in inspect.getmembers(torch.nn.modules.activation) if inspect.isclass(cls_obj) and cls_obj.__module__ == 'torch.nn.modules.activation'})
def __init__(self, dim, state_dict=None, layer_structure=None, activation_func=None, weight_init='Normal',
add_layer_norm=False, use_dropout=False, activate_output=False, last_layer_dropout=False):
super().__init__()
assert layer_structure is not None, "layer_structure must not be None"
assert layer_structure[0] == 1, "Multiplier Sequence should start with size 1!"
assert layer_structure[-1] == 1, "Multiplier Sequence should end with size 1!"
linears = []
for i in range(len(layer_structure) - 1):
# Add a fully-connected layer
linears.append(torch.nn.Linear(int(dim * layer_structure[i]), int(dim * layer_structure[i+1])))
# Add an activation func except last layer
if activation_func == "linear" or activation_func is None or (i >= len(layer_structure) - 2 and not activate_output):
pass
elif activation_func in self.activation_dict:
linears.append(self.activation_dict[activation_func]())
else:
raise RuntimeError(f'hypernetwork uses an unsupported activation function: {activation_func}')
# Add layer normalization
if add_layer_norm:
linears.append(torch.nn.LayerNorm(int(dim * layer_structure[i+1])))
# Add dropout except last layer
if use_dropout and (i < len(layer_structure) - 3 or last_layer_dropout and i < len(layer_structure) - 2):
linears.append(torch.nn.Dropout(p=0.3))
self.linear = torch.nn.Sequential(*linears)
if state_dict is not None:
self.fix_old_state_dict(state_dict)
self.load_state_dict(state_dict)
else:
for layer in self.linear:
if type(layer) == torch.nn.Linear or type(layer) == torch.nn.LayerNorm:
w, b = layer.weight.data, layer.bias.data
if weight_init == "Normal" or type(layer) == torch.nn.LayerNorm:
normal_(w, mean=0.0, std=0.01)
normal_(b, mean=0.0, std=0)
elif weight_init == 'XavierUniform':
xavier_uniform_(w)
zeros_(b)
elif weight_init == 'XavierNormal':
xavier_normal_(w)
zeros_(b)
elif weight_init == 'KaimingUniform':
kaiming_uniform_(w, nonlinearity='leaky_relu' if 'leakyrelu' == activation_func else 'relu')
zeros_(b)
elif weight_init == 'KaimingNormal':
kaiming_normal_(w, nonlinearity='leaky_relu' if 'leakyrelu' == activation_func else 'relu')
zeros_(b)
else:
raise KeyError(f"Key {weight_init} is not defined as initialization!")
self.to(devices.device)
def fix_old_state_dict(self, state_dict):
changes = {
'linear1.bias': 'linear.0.bias',
'linear1.weight': 'linear.0.weight',
'linear2.bias': 'linear.1.bias',
'linear2.weight': 'linear.1.weight',
}
for fr, to in changes.items():
x = state_dict.get(fr, None)
if x is None:
continue
del state_dict[fr]
state_dict[to] = x
def forward(self, x):
return x + self.linear(x) * self.multiplier
def trainables(self):
layer_structure = []
for layer in self.linear:
if type(layer) == torch.nn.Linear or type(layer) == torch.nn.LayerNorm:
layer_structure += [layer.weight, layer.bias]
return layer_structure
def apply_strength(value=None):
HypernetworkModule.multiplier = value if value is not None else shared.opts.sd_hypernetwork_strength
class Hypernetwork:
filename = None
name = None
def __init__(self, name=None, enable_sizes=None, layer_structure=None, activation_func=None, weight_init=None, add_layer_norm=False, use_dropout=False, activate_output=False, **kwargs):
self.filename = None
self.name = name
self.layers = {}
self.step = 0
self.sd_checkpoint = None
self.sd_checkpoint_name = None
self.layer_structure = layer_structure
self.activation_func = activation_func
self.weight_init = weight_init
self.add_layer_norm = add_layer_norm
self.use_dropout = use_dropout
self.activate_output = activate_output
self.last_layer_dropout = kwargs['last_layer_dropout'] if 'last_layer_dropout' in kwargs else True
self.optimizer_name = None
self.optimizer_state_dict = None
for size in enable_sizes or []:
self.layers[size] = (
HypernetworkModule(size, None, self.layer_structure, self.activation_func, self.weight_init,
self.add_layer_norm, self.use_dropout, self.activate_output, last_layer_dropout=self.last_layer_dropout),
HypernetworkModule(size, None, self.layer_structure, self.activation_func, self.weight_init,
self.add_layer_norm, self.use_dropout, self.activate_output, last_layer_dropout=self.last_layer_dropout),
)
self.eval_mode()
def weights(self):
res = []
for k, layers in self.layers.items():
for layer in layers:
res += layer.parameters()
return res
def train_mode(self):
for k, layers in self.layers.items():
for layer in layers:
layer.train()
for param in layer.parameters():
param.requires_grad = True
def eval_mode(self):
for k, layers in self.layers.items():
for layer in layers:
layer.eval()
for param in layer.parameters():
param.requires_grad = False
def save(self, filename):
state_dict = {}
optimizer_saved_dict = {}
for k, v in self.layers.items():
state_dict[k] = (v[0].state_dict(), v[1].state_dict())
state_dict['step'] = self.step
state_dict['name'] = self.name
state_dict['layer_structure'] = self.layer_structure
state_dict['activation_func'] = self.activation_func
state_dict['is_layer_norm'] = self.add_layer_norm
state_dict['weight_initialization'] = self.weight_init
state_dict['use_dropout'] = self.use_dropout
state_dict['sd_checkpoint'] = self.sd_checkpoint
state_dict['sd_checkpoint_name'] = self.sd_checkpoint_name
state_dict['activate_output'] = self.activate_output
state_dict['last_layer_dropout'] = self.last_layer_dropout
if self.optimizer_name is not None:
optimizer_saved_dict['optimizer_name'] = self.optimizer_name
torch.save(state_dict, filename)
if shared.opts.save_optimizer_state and self.optimizer_state_dict:
optimizer_saved_dict['hash'] = sd_models.model_hash(filename)
optimizer_saved_dict['optimizer_state_dict'] = self.optimizer_state_dict
torch.save(optimizer_saved_dict, filename + '.optim')
def load(self, filename):
self.filename = filename
if self.name is None:
self.name = os.path.splitext(os.path.basename(filename))[0]
state_dict = torch.load(filename, map_location='cpu')
self.layer_structure = state_dict.get('layer_structure', [1, 2, 1])
print(self.layer_structure)
self.activation_func = state_dict.get('activation_func', None)
print(f"Activation function is {self.activation_func}")
self.weight_init = state_dict.get('weight_initialization', 'Normal')
print(f"Weight initialization is {self.weight_init}")
self.add_layer_norm = state_dict.get('is_layer_norm', False)
print(f"Layer norm is set to {self.add_layer_norm}")
self.use_dropout = state_dict.get('use_dropout', False)
print(f"Dropout usage is set to {self.use_dropout}" )
self.activate_output = state_dict.get('activate_output', True)
print(f"Activate last layer is set to {self.activate_output}")
self.last_layer_dropout = state_dict.get('last_layer_dropout', False)
optimizer_saved_dict = torch.load(self.filename + '.optim', map_location = 'cpu') if os.path.exists(self.filename + '.optim') else {}
self.optimizer_name = optimizer_saved_dict.get('optimizer_name', 'AdamW')
print(f"Optimizer name is {self.optimizer_name}")
if sd_models.model_hash(filename) == optimizer_saved_dict.get('hash', None):
self.optimizer_state_dict = optimizer_saved_dict.get('optimizer_state_dict', None)
else:
self.optimizer_state_dict = None
if self.optimizer_state_dict:
print("Loaded existing optimizer from checkpoint")
else:
print("No saved optimizer exists in checkpoint")
for size, sd in state_dict.items():
if type(size) == int:
self.layers[size] = (
HypernetworkModule(size, sd[0], self.layer_structure, self.activation_func, self.weight_init,
self.add_layer_norm, self.use_dropout, self.activate_output, last_layer_dropout=self.last_layer_dropout),
HypernetworkModule(size, sd[1], self.layer_structure, self.activation_func, self.weight_init,
self.add_layer_norm, self.use_dropout, self.activate_output, last_layer_dropout=self.last_layer_dropout),
)
self.name = state_dict.get('name', self.name)
self.step = state_dict.get('step', 0)
self.sd_checkpoint = state_dict.get('sd_checkpoint', None)
self.sd_checkpoint_name = state_dict.get('sd_checkpoint_name', None)
def list_hypernetworks(path):
res = {}
for filename in sorted(glob.iglob(os.path.join(path, '**/*.pt'), recursive=True)):
name = os.path.splitext(os.path.basename(filename))[0]
# Prevent a hypothetical "None.pt" from being listed.
if name != "None":
res[name + f"({sd_models.model_hash(filename)})"] = filename
return res
def load_hypernetwork(filename):
path = shared.hypernetworks.get(filename, None)
# Prevent any file named "None.pt" from being loaded.
if path is not None and filename != "None":
print(f"Loading hypernetwork {filename}")
try:
shared.loaded_hypernetwork = Hypernetwork()
shared.loaded_hypernetwork.load(path)
except Exception:
print(f"Error loading hypernetwork {path}", file=sys.stderr)
print(traceback.format_exc(), file=sys.stderr)
else:
if shared.loaded_hypernetwork is not None:
print(f"Unloading hypernetwork")
shared.loaded_hypernetwork = None
def find_closest_hypernetwork_name(search: str):
if not search:
return None
search = search.lower()
applicable = [name for name in shared.hypernetworks if search in name.lower()]
if not applicable:
return None
applicable = sorted(applicable, key=lambda name: len(name))
return applicable[0]
def apply_hypernetwork(hypernetwork, context, layer=None):
hypernetwork_layers = (hypernetwork.layers if hypernetwork is not None else {}).get(context.shape[2], None)
if hypernetwork_layers is None:
return context, context
if layer is not None:
layer.hyper_k = hypernetwork_layers[0]
layer.hyper_v = hypernetwork_layers[1]
context_k = hypernetwork_layers[0](context)
context_v = hypernetwork_layers[1](context)
return context_k, context_v
def attention_CrossAttention_forward(self, x, context=None, mask=None):
h = self.heads
q = self.to_q(x)
context = default(context, x)
context_k, context_v = apply_hypernetwork(shared.loaded_hypernetwork, context, self)
k = self.to_k(context_k)
v = self.to_v(context_v)
q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> (b h) n d', h=h), (q, k, v))
sim = einsum('b i d, b j d -> b i j', q, k) * self.scale
if mask is not None:
mask = rearrange(mask, 'b ... -> b (...)')
max_neg_value = -torch.finfo(sim.dtype).max
mask = repeat(mask, 'b j -> (b h) () j', h=h)
sim.masked_fill_(~mask, max_neg_value)
# attention, what we cannot get enough of
attn = sim.softmax(dim=-1)
out = einsum('b i j, b j d -> b i d', attn, v)
out = rearrange(out, '(b h) n d -> b n (h d)', h=h)
return self.to_out(out)
def stack_conds(conds):
if len(conds) == 1:
return torch.stack(conds)
# same as in reconstruct_multicond_batch
token_count = max([x.shape[0] for x in conds])
for i in range(len(conds)):
if conds[i].shape[0] != token_count:
last_vector = conds[i][-1:]
last_vector_repeated = last_vector.repeat([token_count - conds[i].shape[0], 1])
conds[i] = torch.vstack([conds[i], last_vector_repeated])
return torch.stack(conds)
def statistics(data):
if len(data) < 2:
std = 0
else:
std = stdev(data)
total_information = f"loss:{mean(data):.3f}" + u"\u00B1" + f"({std/ (len(data) ** 0.5):.3f})"
recent_data = data[-32:]
if len(recent_data) < 2:
std = 0
else:
std = stdev(recent_data)
recent_information = f"recent 32 loss:{mean(recent_data):.3f}" + u"\u00B1" + f"({std / (len(recent_data) ** 0.5):.3f})"
return total_information, recent_information
def report_statistics(loss_info:dict):
keys = sorted(loss_info.keys(), key=lambda x: sum(loss_info[x]) / len(loss_info[x]))
for key in keys:
try:
print("Loss statistics for file " + key)
info, recent = statistics(list(loss_info[key]))
print(info)
print(recent)
except Exception as e:
print(e)
def train_hypernetwork(hypernetwork_name, learn_rate, batch_size, gradient_step, data_root, log_directory, training_width, training_height, steps, shuffle_tags, tag_drop_out, latent_sampling_method, create_image_every, save_hypernetwork_every, template_file, preview_from_txt2img, preview_prompt, preview_negative_prompt, preview_steps, preview_sampler_index, preview_cfg_scale, preview_seed, preview_width, preview_height):
# images allows training previews to have infotext. Importing it at the top causes a circular import problem.
from modules import images
save_hypernetwork_every = save_hypernetwork_every or 0
create_image_every = create_image_every or 0
textual_inversion.validate_train_inputs(hypernetwork_name, learn_rate, batch_size, gradient_step, data_root, template_file, steps, save_hypernetwork_every, create_image_every, log_directory, name="hypernetwork")
path = shared.hypernetworks.get(hypernetwork_name, None)
shared.loaded_hypernetwork = Hypernetwork()
shared.loaded_hypernetwork.load(path)
shared.state.textinfo = "Initializing hypernetwork training..."
shared.state.job_count = steps
hypernetwork_name = hypernetwork_name.rsplit('(', 1)[0]
filename = os.path.join(shared.cmd_opts.hypernetwork_dir, f'{hypernetwork_name}.pt')
log_directory = os.path.join(log_directory, datetime.datetime.now().strftime("%Y-%m-%d"), hypernetwork_name)
unload = shared.opts.unload_models_when_training
if save_hypernetwork_every > 0:
hypernetwork_dir = os.path.join(log_directory, "hypernetworks")
os.makedirs(hypernetwork_dir, exist_ok=True)
else:
hypernetwork_dir = None
if create_image_every > 0:
images_dir = os.path.join(log_directory, "images")
os.makedirs(images_dir, exist_ok=True)
else:
images_dir = None
hypernetwork = shared.loaded_hypernetwork
checkpoint = sd_models.select_checkpoint()
initial_step = hypernetwork.step or 0
if initial_step >= steps:
shared.state.textinfo = f"Model has already been trained beyond specified max steps"
return hypernetwork, filename
scheduler = LearnRateScheduler(learn_rate, steps, initial_step)
# dataset loading may take a while, so input validations and early returns should be done before this
shared.state.textinfo = f"Preparing dataset from {html.escape(data_root)}..."
pin_memory = shared.opts.pin_memory
ds = modules.textual_inversion.dataset.PersonalizedBase(data_root=data_root, width=training_width, height=training_height, repeats=shared.opts.training_image_repeats_per_epoch, placeholder_token=hypernetwork_name, model=shared.sd_model, cond_model=shared.sd_model.cond_stage_model, device=devices.device, template_file=template_file, include_cond=True, batch_size=batch_size, gradient_step=gradient_step, shuffle_tags=shuffle_tags, tag_drop_out=tag_drop_out, latent_sampling_method=latent_sampling_method)
latent_sampling_method = ds.latent_sampling_method
dl = modules.textual_inversion.dataset.PersonalizedDataLoader(ds, latent_sampling_method=latent_sampling_method, batch_size=ds.batch_size, pin_memory=pin_memory)
old_parallel_processing_allowed = shared.parallel_processing_allowed
if unload:
shared.parallel_processing_allowed = False
shared.sd_model.cond_stage_model.to(devices.cpu)
shared.sd_model.first_stage_model.to(devices.cpu)
weights = hypernetwork.weights()
hypernetwork.train_mode()
# Here we use optimizer from saved HN, or we can specify as UI option.
if hypernetwork.optimizer_name in optimizer_dict:
optimizer = optimizer_dict[hypernetwork.optimizer_name](params=weights, lr=scheduler.learn_rate)
optimizer_name = hypernetwork.optimizer_name
else:
print(f"Optimizer type {hypernetwork.optimizer_name} is not defined!")
optimizer = torch.optim.AdamW(params=weights, lr=scheduler.learn_rate)
optimizer_name = 'AdamW'
if hypernetwork.optimizer_state_dict: # This line must be changed if Optimizer type can be different from saved optimizer.
try:
optimizer.load_state_dict(hypernetwork.optimizer_state_dict)
except RuntimeError as e:
print("Cannot resume from saved optimizer!")
print(e)
scaler = torch.cuda.amp.GradScaler()
batch_size = ds.batch_size
gradient_step = ds.gradient_step
# n steps = batch_size * gradient_step * n image processed
steps_per_epoch = len(ds) // batch_size // gradient_step
max_steps_per_epoch = len(ds) // batch_size - (len(ds) // batch_size) % gradient_step
loss_step = 0
_loss_step = 0 #internal
# size = len(ds.indexes)
# loss_dict = defaultdict(lambda : deque(maxlen = 1024))
# losses = torch.zeros((size,))
# previous_mean_losses = [0]
# previous_mean_loss = 0
# print("Mean loss of {} elements".format(size))
steps_without_grad = 0
last_saved_file = "<none>"
last_saved_image = "<none>"
forced_filename = "<none>"
pbar = tqdm.tqdm(total=steps - initial_step)
try:
for i in range((steps-initial_step) * gradient_step):
if scheduler.finished:
break
if shared.state.interrupted:
break
for j, batch in enumerate(dl):
# works as a drop_last=True for gradient accumulation
if j == max_steps_per_epoch:
break
scheduler.apply(optimizer, hypernetwork.step)
if scheduler.finished:
break
if shared.state.interrupted:
break
with devices.autocast():
x = batch.latent_sample.to(devices.device, non_blocking=pin_memory)
if tag_drop_out != 0 or shuffle_tags:
shared.sd_model.cond_stage_model.to(devices.device)
c = shared.sd_model.cond_stage_model(batch.cond_text).to(devices.device, non_blocking=pin_memory)
shared.sd_model.cond_stage_model.to(devices.cpu)
else:
c = stack_conds(batch.cond).to(devices.device, non_blocking=pin_memory)
loss = shared.sd_model(x, c)[0] / gradient_step
del x
del c
_loss_step += loss.item()
scaler.scale(loss).backward()
# go back until we reach gradient accumulation steps
if (j + 1) % gradient_step != 0:
continue
# print(f"grad:{weights[0].grad.detach().cpu().abs().mean().item():.7f}")
# scaler.unscale_(optimizer)
# print(f"grad:{weights[0].grad.detach().cpu().abs().mean().item():.15f}")
# torch.nn.utils.clip_grad_norm_(weights, max_norm=1.0)
# print(f"grad:{weights[0].grad.detach().cpu().abs().mean().item():.15f}")
scaler.step(optimizer)
scaler.update()
hypernetwork.step += 1
pbar.update()
optimizer.zero_grad(set_to_none=True)
loss_step = _loss_step
_loss_step = 0
steps_done = hypernetwork.step + 1
epoch_num = hypernetwork.step // steps_per_epoch
epoch_step = hypernetwork.step % steps_per_epoch
pbar.set_description(f"[Epoch {epoch_num}: {epoch_step+1}/{steps_per_epoch}]loss: {loss_step:.7f}")
if hypernetwork_dir is not None and steps_done % save_hypernetwork_every == 0:
# Before saving, change name to match current checkpoint.
hypernetwork_name_every = f'{hypernetwork_name}-{steps_done}'
last_saved_file = os.path.join(hypernetwork_dir, f'{hypernetwork_name_every}.pt')
hypernetwork.optimizer_name = optimizer_name
if shared.opts.save_optimizer_state:
hypernetwork.optimizer_state_dict = optimizer.state_dict()
save_hypernetwork(hypernetwork, checkpoint, hypernetwork_name, last_saved_file)
hypernetwork.optimizer_state_dict = None # dereference it after saving, to save memory.
textual_inversion.write_loss(log_directory, "hypernetwork_loss.csv", hypernetwork.step, steps_per_epoch, {
"loss": f"{loss_step:.7f}",
"learn_rate": scheduler.learn_rate
})
if images_dir is not None and steps_done % create_image_every == 0:
forced_filename = f'{hypernetwork_name}-{steps_done}'
last_saved_image = os.path.join(images_dir, forced_filename)
hypernetwork.eval_mode()
shared.sd_model.cond_stage_model.to(devices.device)
shared.sd_model.first_stage_model.to(devices.device)
p = processing.StableDiffusionProcessingTxt2Img(
sd_model=shared.sd_model,
do_not_save_grid=True,
do_not_save_samples=True,
)
if preview_from_txt2img:
p.prompt = preview_prompt
p.negative_prompt = preview_negative_prompt
p.steps = preview_steps
p.sampler_name = sd_samplers.samplers[preview_sampler_index].name
p.cfg_scale = preview_cfg_scale
p.seed = preview_seed
p.width = preview_width
p.height = preview_height
else:
p.prompt = batch.cond_text[0]
p.steps = 20
p.width = training_width
p.height = training_height
preview_text = p.prompt
processed = processing.process_images(p)
image = processed.images[0] if len(processed.images) > 0 else None
if unload:
shared.sd_model.cond_stage_model.to(devices.cpu)
shared.sd_model.first_stage_model.to(devices.cpu)
hypernetwork.train_mode()
if image is not None:
shared.state.current_image = image
last_saved_image, last_text_info = images.save_image(image, images_dir, "", p.seed, p.prompt, shared.opts.samples_format, processed.infotexts[0], p=p, forced_filename=forced_filename, save_to_dirs=False)
last_saved_image += f", prompt: {preview_text}"
shared.state.job_no = hypernetwork.step
shared.state.textinfo = f"""
<p>
Loss: {loss_step:.7f}<br/>
Step: {steps_done}<br/>
Last prompt: {html.escape(batch.cond_text[0])}<br/>
Last saved hypernetwork: {html.escape(last_saved_file)}<br/>
Last saved image: {html.escape(last_saved_image)}<br/>
</p>
"""
except Exception:
print(traceback.format_exc(), file=sys.stderr)
finally:
pbar.leave = False
pbar.close()
hypernetwork.eval_mode()
#report_statistics(loss_dict)
filename = os.path.join(shared.cmd_opts.hypernetwork_dir, f'{hypernetwork_name}.pt')
hypernetwork.optimizer_name = optimizer_name
if shared.opts.save_optimizer_state:
hypernetwork.optimizer_state_dict = optimizer.state_dict()
save_hypernetwork(hypernetwork, checkpoint, hypernetwork_name, filename)
del optimizer
hypernetwork.optimizer_state_dict = None # dereference it after saving, to save memory.
shared.sd_model.cond_stage_model.to(devices.device)
shared.sd_model.first_stage_model.to(devices.device)
shared.parallel_processing_allowed = old_parallel_processing_allowed
return hypernetwork, filename
def save_hypernetwork(hypernetwork, checkpoint, hypernetwork_name, filename):
old_hypernetwork_name = hypernetwork.name
old_sd_checkpoint = hypernetwork.sd_checkpoint if hasattr(hypernetwork, "sd_checkpoint") else None
old_sd_checkpoint_name = hypernetwork.sd_checkpoint_name if hasattr(hypernetwork, "sd_checkpoint_name") else None
try:
hypernetwork.sd_checkpoint = checkpoint.hash
hypernetwork.sd_checkpoint_name = checkpoint.model_name
hypernetwork.name = hypernetwork_name
hypernetwork.save(filename)
except:
hypernetwork.sd_checkpoint = old_sd_checkpoint
hypernetwork.sd_checkpoint_name = old_sd_checkpoint_name
hypernetwork.name = old_hypernetwork_name
raise
<|code_end|>
modules/textual_inversion/textual_inversion.py
<|code_start|>import os
import sys
import traceback
import torch
import tqdm
import html
import datetime
import csv
from PIL import Image, PngImagePlugin
from modules import shared, devices, sd_hijack, processing, sd_models, images, sd_samplers
import modules.textual_inversion.dataset
from modules.textual_inversion.learn_schedule import LearnRateScheduler
from modules.textual_inversion.image_embedding import (embedding_to_b64, embedding_from_b64,
insert_image_data_embed, extract_image_data_embed,
caption_image_overlay)
class Embedding:
def __init__(self, vec, name, step=None):
self.vec = vec
self.name = name
self.step = step
self.cached_checksum = None
self.sd_checkpoint = None
self.sd_checkpoint_name = None
def save(self, filename):
embedding_data = {
"string_to_token": {"*": 265},
"string_to_param": {"*": self.vec},
"name": self.name,
"step": self.step,
"sd_checkpoint": self.sd_checkpoint,
"sd_checkpoint_name": self.sd_checkpoint_name,
}
torch.save(embedding_data, filename)
def checksum(self):
if self.cached_checksum is not None:
return self.cached_checksum
def const_hash(a):
r = 0
for v in a:
r = (r * 281 ^ int(v) * 997) & 0xFFFFFFFF
return r
self.cached_checksum = f'{const_hash(self.vec.reshape(-1) * 100) & 0xffff:04x}'
return self.cached_checksum
class EmbeddingDatabase:
def __init__(self, embeddings_dir):
self.ids_lookup = {}
self.word_embeddings = {}
self.dir_mtime = None
self.embeddings_dir = embeddings_dir
def register_embedding(self, embedding, model):
self.word_embeddings[embedding.name] = embedding
# TODO changing between clip and open clip changes tokenization, which will cause embeddings to stop working
ids = model.cond_stage_model.tokenize([embedding.name])[0]
first_id = ids[0]
if first_id not in self.ids_lookup:
self.ids_lookup[first_id] = []
self.ids_lookup[first_id] = sorted(self.ids_lookup[first_id] + [(ids, embedding)], key=lambda x: len(x[0]), reverse=True)
return embedding
def load_textual_inversion_embeddings(self):
mt = os.path.getmtime(self.embeddings_dir)
if self.dir_mtime is not None and mt <= self.dir_mtime:
return
self.dir_mtime = mt
self.ids_lookup.clear()
self.word_embeddings.clear()
def process_file(path, filename):
name = os.path.splitext(filename)[0]
data = []
if os.path.splitext(filename.upper())[-1] in ['.PNG', '.WEBP', '.JXL', '.AVIF']:
embed_image = Image.open(path)
if hasattr(embed_image, 'text') and 'sd-ti-embedding' in embed_image.text:
data = embedding_from_b64(embed_image.text['sd-ti-embedding'])
name = data.get('name', name)
else:
data = extract_image_data_embed(embed_image)
name = data.get('name', name)
else:
data = torch.load(path, map_location="cpu")
# textual inversion embeddings
if 'string_to_param' in data:
param_dict = data['string_to_param']
if hasattr(param_dict, '_parameters'):
param_dict = getattr(param_dict, '_parameters') # fix for torch 1.12.1 loading saved file from torch 1.11
assert len(param_dict) == 1, 'embedding file has multiple terms in it'
emb = next(iter(param_dict.items()))[1]
# diffuser concepts
elif type(data) == dict and type(next(iter(data.values()))) == torch.Tensor:
assert len(data.keys()) == 1, 'embedding file has multiple terms in it'
emb = next(iter(data.values()))
if len(emb.shape) == 1:
emb = emb.unsqueeze(0)
else:
raise Exception(f"Couldn't identify {filename} as neither textual inversion embedding nor diffuser concept.")
vec = emb.detach().to(devices.device, dtype=torch.float32)
embedding = Embedding(vec, name)
embedding.step = data.get('step', None)
embedding.sd_checkpoint = data.get('sd_checkpoint', None)
embedding.sd_checkpoint_name = data.get('sd_checkpoint_name', None)
self.register_embedding(embedding, shared.sd_model)
for fn in os.listdir(self.embeddings_dir):
try:
fullfn = os.path.join(self.embeddings_dir, fn)
if os.stat(fullfn).st_size == 0:
continue
process_file(fullfn, fn)
except Exception:
print(f"Error loading emedding {fn}:", file=sys.stderr)
print(traceback.format_exc(), file=sys.stderr)
continue
print(f"Loaded a total of {len(self.word_embeddings)} textual inversion embeddings.")
print("Embeddings:", ', '.join(self.word_embeddings.keys()))
def find_embedding_at_position(self, tokens, offset):
token = tokens[offset]
possible_matches = self.ids_lookup.get(token, None)
if possible_matches is None:
return None, None
for ids, embedding in possible_matches:
if tokens[offset:offset + len(ids)] == ids:
return embedding, len(ids)
return None, None
def create_embedding(name, num_vectors_per_token, overwrite_old, init_text='*'):
cond_model = shared.sd_model.cond_stage_model
with devices.autocast():
cond_model([""]) # will send cond model to GPU if lowvram/medvram is active
embedded = cond_model.encode_embedding_init_text(init_text, num_vectors_per_token)
vec = torch.zeros((num_vectors_per_token, embedded.shape[1]), device=devices.device)
for i in range(num_vectors_per_token):
vec[i] = embedded[i * int(embedded.shape[0]) // num_vectors_per_token]
# Remove illegal characters from name.
name = "".join( x for x in name if (x.isalnum() or x in "._- "))
fn = os.path.join(shared.cmd_opts.embeddings_dir, f"{name}.pt")
if not overwrite_old:
assert not os.path.exists(fn), f"file {fn} already exists"
embedding = Embedding(vec, name)
embedding.step = 0
embedding.save(fn)
return fn
def write_loss(log_directory, filename, step, epoch_len, values):
if shared.opts.training_write_csv_every == 0:
return
if step % shared.opts.training_write_csv_every != 0:
return
write_csv_header = False if os.path.exists(os.path.join(log_directory, filename)) else True
with open(os.path.join(log_directory, filename), "a+", newline='') as fout:
csv_writer = csv.DictWriter(fout, fieldnames=["step", "epoch", "epoch_step", *(values.keys())])
if write_csv_header:
csv_writer.writeheader()
epoch = (step - 1) // epoch_len
epoch_step = (step - 1) % epoch_len
csv_writer.writerow({
"step": step,
"epoch": epoch,
"epoch_step": epoch_step,
**values,
})
def validate_train_inputs(model_name, learn_rate, batch_size, gradient_step, data_root, template_file, steps, save_model_every, create_image_every, log_directory, name="embedding"):
assert model_name, f"{name} not selected"
assert learn_rate, "Learning rate is empty or 0"
assert isinstance(batch_size, int), "Batch size must be integer"
assert batch_size > 0, "Batch size must be positive"
assert isinstance(gradient_step, int), "Gradient accumulation step must be integer"
assert gradient_step > 0, "Gradient accumulation step must be positive"
assert data_root, "Dataset directory is empty"
assert os.path.isdir(data_root), "Dataset directory doesn't exist"
assert os.listdir(data_root), "Dataset directory is empty"
assert template_file, "Prompt template file is empty"
assert os.path.isfile(template_file), "Prompt template file doesn't exist"
assert steps, "Max steps is empty or 0"
assert isinstance(steps, int), "Max steps must be integer"
assert steps > 0 , "Max steps must be positive"
assert isinstance(save_model_every, int), "Save {name} must be integer"
assert save_model_every >= 0 , "Save {name} must be positive or 0"
assert isinstance(create_image_every, int), "Create image must be integer"
assert create_image_every >= 0 , "Create image must be positive or 0"
if save_model_every or create_image_every:
assert log_directory, "Log directory is empty"
def train_embedding(embedding_name, learn_rate, batch_size, gradient_step, data_root, log_directory, training_width, training_height, steps, shuffle_tags, tag_drop_out, latent_sampling_method, create_image_every, save_embedding_every, template_file, save_image_with_stored_embedding, preview_from_txt2img, preview_prompt, preview_negative_prompt, preview_steps, preview_sampler_index, preview_cfg_scale, preview_seed, preview_width, preview_height):
save_embedding_every = save_embedding_every or 0
create_image_every = create_image_every or 0
validate_train_inputs(embedding_name, learn_rate, batch_size, gradient_step, data_root, template_file, steps, save_embedding_every, create_image_every, log_directory, name="embedding")
shared.state.textinfo = "Initializing textual inversion training..."
shared.state.job_count = steps
filename = os.path.join(shared.cmd_opts.embeddings_dir, f'{embedding_name}.pt')
log_directory = os.path.join(log_directory, datetime.datetime.now().strftime("%Y-%m-%d"), embedding_name)
unload = shared.opts.unload_models_when_training
if save_embedding_every > 0:
embedding_dir = os.path.join(log_directory, "embeddings")
os.makedirs(embedding_dir, exist_ok=True)
else:
embedding_dir = None
if create_image_every > 0:
images_dir = os.path.join(log_directory, "images")
os.makedirs(images_dir, exist_ok=True)
else:
images_dir = None
if create_image_every > 0 and save_image_with_stored_embedding:
images_embeds_dir = os.path.join(log_directory, "image_embeddings")
os.makedirs(images_embeds_dir, exist_ok=True)
else:
images_embeds_dir = None
hijack = sd_hijack.model_hijack
embedding = hijack.embedding_db.word_embeddings[embedding_name]
checkpoint = sd_models.select_checkpoint()
initial_step = embedding.step or 0
if initial_step >= steps:
shared.state.textinfo = f"Model has already been trained beyond specified max steps"
return embedding, filename
scheduler = LearnRateScheduler(learn_rate, steps, initial_step)
# dataset loading may take a while, so input validations and early returns should be done before this
shared.state.textinfo = f"Preparing dataset from {html.escape(data_root)}..."
old_parallel_processing_allowed = shared.parallel_processing_allowed
pin_memory = shared.opts.pin_memory
ds = modules.textual_inversion.dataset.PersonalizedBase(data_root=data_root, width=training_width, height=training_height, repeats=shared.opts.training_image_repeats_per_epoch, placeholder_token=embedding_name, model=shared.sd_model, cond_model=shared.sd_model.cond_stage_model, device=devices.device, template_file=template_file, batch_size=batch_size, gradient_step=gradient_step, shuffle_tags=shuffle_tags, tag_drop_out=tag_drop_out, latent_sampling_method=latent_sampling_method)
latent_sampling_method = ds.latent_sampling_method
dl = modules.textual_inversion.dataset.PersonalizedDataLoader(ds, latent_sampling_method=latent_sampling_method, batch_size=ds.batch_size, pin_memory=pin_memory)
if unload:
shared.parallel_processing_allowed = False
shared.sd_model.first_stage_model.to(devices.cpu)
embedding.vec.requires_grad = True
optimizer = torch.optim.AdamW([embedding.vec], lr=scheduler.learn_rate, weight_decay=0.0)
scaler = torch.cuda.amp.GradScaler()
batch_size = ds.batch_size
gradient_step = ds.gradient_step
# n steps = batch_size * gradient_step * n image processed
steps_per_epoch = len(ds) // batch_size // gradient_step
max_steps_per_epoch = len(ds) // batch_size - (len(ds) // batch_size) % gradient_step
loss_step = 0
_loss_step = 0 #internal
last_saved_file = "<none>"
last_saved_image = "<none>"
forced_filename = "<none>"
embedding_yet_to_be_embedded = False
pbar = tqdm.tqdm(total=steps - initial_step)
try:
for i in range((steps-initial_step) * gradient_step):
if scheduler.finished:
break
if shared.state.interrupted:
break
for j, batch in enumerate(dl):
# works as a drop_last=True for gradient accumulation
if j == max_steps_per_epoch:
break
scheduler.apply(optimizer, embedding.step)
if scheduler.finished:
break
if shared.state.interrupted:
break
with devices.autocast():
# c = stack_conds(batch.cond).to(devices.device)
# mask = torch.tensor(batch.emb_index).to(devices.device, non_blocking=pin_memory)
# print(mask)
# c[:, 1:1+embedding.vec.shape[0]] = embedding.vec.to(devices.device, non_blocking=pin_memory)
x = batch.latent_sample.to(devices.device, non_blocking=pin_memory)
c = shared.sd_model.cond_stage_model(batch.cond_text)
loss = shared.sd_model(x, c)[0] / gradient_step
del x
_loss_step += loss.item()
scaler.scale(loss).backward()
# go back until we reach gradient accumulation steps
if (j + 1) % gradient_step != 0:
continue
scaler.step(optimizer)
scaler.update()
embedding.step += 1
pbar.update()
optimizer.zero_grad(set_to_none=True)
loss_step = _loss_step
_loss_step = 0
steps_done = embedding.step + 1
epoch_num = embedding.step // steps_per_epoch
epoch_step = embedding.step % steps_per_epoch
pbar.set_description(f"[Epoch {epoch_num}: {epoch_step+1}/{steps_per_epoch}]loss: {loss_step:.7f}")
if embedding_dir is not None and steps_done % save_embedding_every == 0:
# Before saving, change name to match current checkpoint.
embedding_name_every = f'{embedding_name}-{steps_done}'
last_saved_file = os.path.join(embedding_dir, f'{embedding_name_every}.pt')
#if shared.opts.save_optimizer_state:
#embedding.optimizer_state_dict = optimizer.state_dict()
save_embedding(embedding, checkpoint, embedding_name_every, last_saved_file, remove_cached_checksum=True)
embedding_yet_to_be_embedded = True
write_loss(log_directory, "textual_inversion_loss.csv", embedding.step, steps_per_epoch, {
"loss": f"{loss_step:.7f}",
"learn_rate": scheduler.learn_rate
})
if images_dir is not None and steps_done % create_image_every == 0:
forced_filename = f'{embedding_name}-{steps_done}'
last_saved_image = os.path.join(images_dir, forced_filename)
shared.sd_model.first_stage_model.to(devices.device)
p = processing.StableDiffusionProcessingTxt2Img(
sd_model=shared.sd_model,
do_not_save_grid=True,
do_not_save_samples=True,
do_not_reload_embeddings=True,
)
if preview_from_txt2img:
p.prompt = preview_prompt
p.negative_prompt = preview_negative_prompt
p.steps = preview_steps
p.sampler_name = sd_samplers.samplers[preview_sampler_index].name
p.cfg_scale = preview_cfg_scale
p.seed = preview_seed
p.width = preview_width
p.height = preview_height
else:
p.prompt = batch.cond_text[0]
p.steps = 20
p.width = training_width
p.height = training_height
preview_text = p.prompt
processed = processing.process_images(p)
image = processed.images[0] if len(processed.images) > 0 else None
if unload:
shared.sd_model.first_stage_model.to(devices.cpu)
if image is not None:
shared.state.current_image = image
last_saved_image, last_text_info = images.save_image(image, images_dir, "", p.seed, p.prompt, shared.opts.samples_format, processed.infotexts[0], p=p, forced_filename=forced_filename, save_to_dirs=False)
last_saved_image += f", prompt: {preview_text}"
if save_image_with_stored_embedding and os.path.exists(last_saved_file) and embedding_yet_to_be_embedded:
last_saved_image_chunks = os.path.join(images_embeds_dir, f'{embedding_name}-{steps_done}.png')
info = PngImagePlugin.PngInfo()
data = torch.load(last_saved_file)
info.add_text("sd-ti-embedding", embedding_to_b64(data))
title = "<{}>".format(data.get('name', '???'))
try:
vectorSize = list(data['string_to_param'].values())[0].shape[0]
except Exception as e:
vectorSize = '?'
checkpoint = sd_models.select_checkpoint()
footer_left = checkpoint.model_name
footer_mid = '[{}]'.format(checkpoint.hash)
footer_right = '{}v {}s'.format(vectorSize, steps_done)
captioned_image = caption_image_overlay(image, title, footer_left, footer_mid, footer_right)
captioned_image = insert_image_data_embed(captioned_image, data)
captioned_image.save(last_saved_image_chunks, "PNG", pnginfo=info)
embedding_yet_to_be_embedded = False
last_saved_image, last_text_info = images.save_image(image, images_dir, "", p.seed, p.prompt, shared.opts.samples_format, processed.infotexts[0], p=p, forced_filename=forced_filename, save_to_dirs=False)
last_saved_image += f", prompt: {preview_text}"
shared.state.job_no = embedding.step
shared.state.textinfo = f"""
<p>
Loss: {loss_step:.7f}<br/>
Step: {steps_done}<br/>
Last prompt: {html.escape(batch.cond_text[0])}<br/>
Last saved embedding: {html.escape(last_saved_file)}<br/>
Last saved image: {html.escape(last_saved_image)}<br/>
</p>
"""
filename = os.path.join(shared.cmd_opts.embeddings_dir, f'{embedding_name}.pt')
save_embedding(embedding, checkpoint, embedding_name, filename, remove_cached_checksum=True)
except Exception:
print(traceback.format_exc(), file=sys.stderr)
pass
finally:
pbar.leave = False
pbar.close()
shared.sd_model.first_stage_model.to(devices.device)
shared.parallel_processing_allowed = old_parallel_processing_allowed
return embedding, filename
def save_embedding(embedding, checkpoint, embedding_name, filename, remove_cached_checksum=True):
old_embedding_name = embedding.name
old_sd_checkpoint = embedding.sd_checkpoint if hasattr(embedding, "sd_checkpoint") else None
old_sd_checkpoint_name = embedding.sd_checkpoint_name if hasattr(embedding, "sd_checkpoint_name") else None
old_cached_checksum = embedding.cached_checksum if hasattr(embedding, "cached_checksum") else None
try:
embedding.sd_checkpoint = checkpoint.hash
embedding.sd_checkpoint_name = checkpoint.model_name
if remove_cached_checksum:
embedding.cached_checksum = None
embedding.name = embedding_name
embedding.save(filename)
except:
embedding.sd_checkpoint = old_sd_checkpoint
embedding.sd_checkpoint_name = old_sd_checkpoint_name
embedding.name = old_embedding_name
embedding.cached_checksum = old_cached_checksum
raise
<|code_end|>
|
[Feature Request]: Add newest DPM-Solver++
### Is there an existing issue for this?
- [X] I have searched the existing issues and checked the recent builds/commits
### What would your feature do ?
Support the newest DPM-Solver++, the state-of-the-art fast sampler for guided sampling by diffusion models.
DPM-Solver++ can achieve great sample quality in only 15 to 20 steps.
References:
https://github.com/LuChengTHU/dpm-solver
https://arxiv.org/abs/2211.01095
https://github.com/huggingface/diffusers/pull/1132
https://github.com/CompVis/stable-diffusion/pull/440
### Proposed workflow
I can help to add the algorithm code.
### Additional information
_No response_
|
launch.py
<|code_start|># this scripts installs necessary requirements and launches main program in webui.py
import subprocess
import os
import sys
import importlib.util
import shlex
import platform
dir_repos = "repositories"
dir_extensions = "extensions"
python = sys.executable
git = os.environ.get('GIT', "git")
index_url = os.environ.get('INDEX_URL', "")
def extract_arg(args, name):
return [x for x in args if x != name], name in args
def run(command, desc=None, errdesc=None, custom_env=None):
if desc is not None:
print(desc)
result = subprocess.run(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True, env=os.environ if custom_env is None else custom_env)
if result.returncode != 0:
message = f"""{errdesc or 'Error running command'}.
Command: {command}
Error code: {result.returncode}
stdout: {result.stdout.decode(encoding="utf8", errors="ignore") if len(result.stdout)>0 else '<empty>'}
stderr: {result.stderr.decode(encoding="utf8", errors="ignore") if len(result.stderr)>0 else '<empty>'}
"""
raise RuntimeError(message)
return result.stdout.decode(encoding="utf8", errors="ignore")
def check_run(command):
result = subprocess.run(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
return result.returncode == 0
def is_installed(package):
try:
spec = importlib.util.find_spec(package)
except ModuleNotFoundError:
return False
return spec is not None
def repo_dir(name):
return os.path.join(dir_repos, name)
def run_python(code, desc=None, errdesc=None):
return run(f'"{python}" -c "{code}"', desc, errdesc)
def run_pip(args, desc=None):
index_url_line = f' --index-url {index_url}' if index_url != '' else ''
return run(f'"{python}" -m pip {args} --prefer-binary{index_url_line}', desc=f"Installing {desc}", errdesc=f"Couldn't install {desc}")
def check_run_python(code):
return check_run(f'"{python}" -c "{code}"')
def git_clone(url, dir, name, commithash=None):
# TODO clone into temporary dir and move if successful
if os.path.exists(dir):
if commithash is None:
return
current_hash = run(f'"{git}" -C {dir} rev-parse HEAD', None, f"Couldn't determine {name}'s hash: {commithash}").strip()
if current_hash == commithash:
return
run(f'"{git}" -C {dir} fetch', f"Fetching updates for {name}...", f"Couldn't fetch {name}")
run(f'"{git}" -C {dir} checkout {commithash}', f"Checking out commit for {name} with hash: {commithash}...", f"Couldn't checkout commit {commithash} for {name}")
return
run(f'"{git}" clone "{url}" "{dir}"', f"Cloning {name} into {dir}...", f"Couldn't clone {name}")
if commithash is not None:
run(f'"{git}" -C {dir} checkout {commithash}', None, "Couldn't checkout {name}'s hash: {commithash}")
def version_check(commit):
try:
import requests
commits = requests.get('https://api.github.com/repos/AUTOMATIC1111/stable-diffusion-webui/branches/master').json()
if commit != "<none>" and commits['commit']['sha'] != commit:
print("--------------------------------------------------------")
print("| You are not up to date with the most recent release. |")
print("| Consider running `git pull` to update. |")
print("--------------------------------------------------------")
elif commits['commit']['sha'] == commit:
print("You are up to date with the most recent release.")
else:
print("Not a git clone, can't perform version check.")
except Exception as e:
print("version check failed", e)
def run_extensions_installers():
if not os.path.isdir(dir_extensions):
return
for dirname_extension in os.listdir(dir_extensions):
path_installer = os.path.join(dir_extensions, dirname_extension, "install.py")
if not os.path.isfile(path_installer):
continue
try:
env = os.environ.copy()
env['PYTHONPATH'] = os.path.abspath(".")
print(run(f'"{python}" "{path_installer}"', errdesc=f"Error running install.py for extension {dirname_extension}", custom_env=env))
except Exception as e:
print(e, file=sys.stderr)
def prepare_enviroment():
torch_command = os.environ.get('TORCH_COMMAND', "pip install torch==1.12.1+cu113 torchvision==0.13.1+cu113 --extra-index-url https://download.pytorch.org/whl/cu113")
requirements_file = os.environ.get('REQS_FILE', "requirements_versions.txt")
commandline_args = os.environ.get('COMMANDLINE_ARGS', "")
gfpgan_package = os.environ.get('GFPGAN_PACKAGE', "git+https://github.com/TencentARC/GFPGAN.git@8d2447a2d918f8eba5a4a01463fd48e45126a379")
clip_package = os.environ.get('CLIP_PACKAGE', "git+https://github.com/openai/CLIP.git@d50d76daa670286dd6cacf3bcd80b5e4823fc8e1")
deepdanbooru_package = os.environ.get('DEEPDANBOORU_PACKAGE', "git+https://github.com/KichangKim/DeepDanbooru.git@d91a2963bf87c6a770d74894667e9ffa9f6de7ff")
xformers_windows_package = os.environ.get('XFORMERS_WINDOWS_PACKAGE', 'https://github.com/C43H66N12O12S2/stable-diffusion-webui/releases/download/f/xformers-0.0.14.dev0-cp310-cp310-win_amd64.whl')
stable_diffusion_repo = os.environ.get('STABLE_DIFFUSION_REPO', "https://github.com/CompVis/stable-diffusion.git")
taming_transformers_repo = os.environ.get('TAMING_REANSFORMERS_REPO', "https://github.com/CompVis/taming-transformers.git")
k_diffusion_repo = os.environ.get('K_DIFFUSION_REPO', 'https://github.com/crowsonkb/k-diffusion.git')
codeformer_repo = os.environ.get('CODEFORMET_REPO', 'https://github.com/sczhou/CodeFormer.git')
blip_repo = os.environ.get('BLIP_REPO', 'https://github.com/salesforce/BLIP.git')
stable_diffusion_commit_hash = os.environ.get('STABLE_DIFFUSION_COMMIT_HASH', "69ae4b35e0a0f6ee1af8bb9a5d0016ccb27e36dc")
taming_transformers_commit_hash = os.environ.get('TAMING_TRANSFORMERS_COMMIT_HASH', "24268930bf1dce879235a7fddd0b2355b84d7ea6")
k_diffusion_commit_hash = os.environ.get('K_DIFFUSION_COMMIT_HASH', "f4e99857772fc3a126ba886aadf795a332774878")
codeformer_commit_hash = os.environ.get('CODEFORMER_COMMIT_HASH', "c5b4593074ba6214284d6acd5f1719b6c5d739af")
blip_commit_hash = os.environ.get('BLIP_COMMIT_HASH', "48211a1594f1321b00f14c9f7a5b4813144b2fb9")
sys.argv += shlex.split(commandline_args)
test_argv = [x for x in sys.argv if x != '--tests']
sys.argv, skip_torch_cuda_test = extract_arg(sys.argv, '--skip-torch-cuda-test')
sys.argv, reinstall_xformers = extract_arg(sys.argv, '--reinstall-xformers')
sys.argv, update_check = extract_arg(sys.argv, '--update-check')
sys.argv, run_tests = extract_arg(sys.argv, '--tests')
xformers = '--xformers' in sys.argv
deepdanbooru = '--deepdanbooru' in sys.argv
ngrok = '--ngrok' in sys.argv
try:
commit = run(f"{git} rev-parse HEAD").strip()
except Exception:
commit = "<none>"
print(f"Python {sys.version}")
print(f"Commit hash: {commit}")
if not is_installed("torch") or not is_installed("torchvision"):
run(f'"{python}" -m {torch_command}', "Installing torch and torchvision", "Couldn't install torch")
if not skip_torch_cuda_test:
run_python("import torch; assert torch.cuda.is_available(), 'Torch is not able to use GPU; add --skip-torch-cuda-test to COMMANDLINE_ARGS variable to disable this check'")
if not is_installed("gfpgan"):
run_pip(f"install {gfpgan_package}", "gfpgan")
if not is_installed("clip"):
run_pip(f"install {clip_package}", "clip")
if (not is_installed("xformers") or reinstall_xformers) and xformers:
if platform.system() == "Windows":
if platform.python_version().startswith("3.10"):
run_pip(f"install -U -I --no-deps {xformers_windows_package}", "xformers")
else:
print("Installation of xformers is not supported in this version of Python.")
print("You can also check this and build manually: https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Xformers#building-xformers-on-windows-by-duckness")
if not is_installed("xformers"):
exit(0)
elif platform.system() == "Linux":
run_pip("install xformers", "xformers")
if not is_installed("deepdanbooru") and deepdanbooru:
run_pip(f"install {deepdanbooru_package}#egg=deepdanbooru[tensorflow] tensorflow==2.10.0 tensorflow-io==0.27.0", "deepdanbooru")
if not is_installed("pyngrok") and ngrok:
run_pip("install pyngrok", "ngrok")
os.makedirs(dir_repos, exist_ok=True)
git_clone(stable_diffusion_repo, repo_dir('stable-diffusion'), "Stable Diffusion", stable_diffusion_commit_hash)
git_clone(taming_transformers_repo, repo_dir('taming-transformers'), "Taming Transformers", taming_transformers_commit_hash)
git_clone(k_diffusion_repo, repo_dir('k-diffusion'), "K-diffusion", k_diffusion_commit_hash)
git_clone(codeformer_repo, repo_dir('CodeFormer'), "CodeFormer", codeformer_commit_hash)
git_clone(blip_repo, repo_dir('BLIP'), "BLIP", blip_commit_hash)
if not is_installed("lpips"):
run_pip(f"install -r {os.path.join(repo_dir('CodeFormer'), 'requirements.txt')}", "requirements for CodeFormer")
run_pip(f"install -r {requirements_file}", "requirements for Web UI")
run_extensions_installers()
if update_check:
version_check(commit)
if "--exit" in sys.argv:
print("Exiting because of --exit argument")
exit(0)
if run_tests:
tests(test_argv)
exit(0)
def tests(argv):
if "--api" not in argv:
argv.append("--api")
print(f"Launching Web UI in another process for testing with arguments: {' '.join(argv[1:])}")
with open('test/stdout.txt', "w", encoding="utf8") as stdout, open('test/stderr.txt', "w", encoding="utf8") as stderr:
proc = subprocess.Popen([sys.executable, *argv], stdout=stdout, stderr=stderr)
import test.server_poll
test.server_poll.run_tests()
print(f"Stopping Web UI process with id {proc.pid}")
proc.kill()
def start():
print(f"Launching {'API server' if '--nowebui' in sys.argv else 'Web UI'} with arguments: {' '.join(sys.argv[1:])}")
import webui
if '--nowebui' in sys.argv:
webui.api_only()
else:
webui.webui()
if __name__ == "__main__":
prepare_enviroment()
start()
<|code_end|>
modules/sd_samplers.py
<|code_start|>from collections import namedtuple
import numpy as np
from math import floor
import torch
import tqdm
from PIL import Image
import inspect
import k_diffusion.sampling
import ldm.models.diffusion.ddim
import ldm.models.diffusion.plms
from modules import prompt_parser, devices, processing, images
from modules.shared import opts, cmd_opts, state
import modules.shared as shared
from modules.script_callbacks import CFGDenoiserParams, cfg_denoiser_callback
SamplerData = namedtuple('SamplerData', ['name', 'constructor', 'aliases', 'options'])
samplers_k_diffusion = [
('Euler a', 'sample_euler_ancestral', ['k_euler_a'], {}),
('Euler', 'sample_euler', ['k_euler'], {}),
('LMS', 'sample_lms', ['k_lms'], {}),
('Heun', 'sample_heun', ['k_heun'], {}),
('DPM2', 'sample_dpm_2', ['k_dpm_2'], {}),
('DPM2 a', 'sample_dpm_2_ancestral', ['k_dpm_2_a'], {}),
('DPM fast', 'sample_dpm_fast', ['k_dpm_fast'], {}),
('DPM adaptive', 'sample_dpm_adaptive', ['k_dpm_ad'], {}),
('LMS Karras', 'sample_lms', ['k_lms_ka'], {'scheduler': 'karras'}),
('DPM2 Karras', 'sample_dpm_2', ['k_dpm_2_ka'], {'scheduler': 'karras'}),
('DPM2 a Karras', 'sample_dpm_2_ancestral', ['k_dpm_2_a_ka'], {'scheduler': 'karras'}),
]
samplers_data_k_diffusion = [
SamplerData(label, lambda model, funcname=funcname: KDiffusionSampler(funcname, model), aliases, options)
for label, funcname, aliases, options in samplers_k_diffusion
if hasattr(k_diffusion.sampling, funcname)
]
all_samplers = [
*samplers_data_k_diffusion,
SamplerData('DDIM', lambda model: VanillaStableDiffusionSampler(ldm.models.diffusion.ddim.DDIMSampler, model), [], {}),
SamplerData('PLMS', lambda model: VanillaStableDiffusionSampler(ldm.models.diffusion.plms.PLMSSampler, model), [], {}),
]
samplers = []
samplers_for_img2img = []
def create_sampler_with_index(list_of_configs, index, model):
config = list_of_configs[index]
sampler = config.constructor(model)
sampler.config = config
return sampler
def set_samplers():
global samplers, samplers_for_img2img
hidden = set(opts.hide_samplers)
hidden_img2img = set(opts.hide_samplers + ['PLMS'])
samplers = [x for x in all_samplers if x.name not in hidden]
samplers_for_img2img = [x for x in all_samplers if x.name not in hidden_img2img]
set_samplers()
sampler_extra_params = {
'sample_euler': ['s_churn', 's_tmin', 's_tmax', 's_noise'],
'sample_heun': ['s_churn', 's_tmin', 's_tmax', 's_noise'],
'sample_dpm_2': ['s_churn', 's_tmin', 's_tmax', 's_noise'],
}
def setup_img2img_steps(p, steps=None):
if opts.img2img_fix_steps or steps is not None:
steps = int((steps or p.steps) / min(p.denoising_strength, 0.999)) if p.denoising_strength > 0 else 0
t_enc = p.steps - 1
else:
steps = p.steps
t_enc = int(min(p.denoising_strength, 0.999) * steps)
return steps, t_enc
def single_sample_to_image(sample):
x_sample = processing.decode_first_stage(shared.sd_model, sample.unsqueeze(0))[0]
x_sample = torch.clamp((x_sample + 1.0) / 2.0, min=0.0, max=1.0)
x_sample = 255. * np.moveaxis(x_sample.cpu().numpy(), 0, 2)
x_sample = x_sample.astype(np.uint8)
return Image.fromarray(x_sample)
def sample_to_image(samples, index=0):
return single_sample_to_image(samples[index])
def samples_to_image_grid(samples):
return images.image_grid([single_sample_to_image(sample) for sample in samples])
def store_latent(decoded):
state.current_latent = decoded
if opts.show_progress_every_n_steps > 0 and shared.state.sampling_step % opts.show_progress_every_n_steps == 0:
if not shared.parallel_processing_allowed:
shared.state.current_image = sample_to_image(decoded)
class InterruptedException(BaseException):
pass
class VanillaStableDiffusionSampler:
def __init__(self, constructor, sd_model):
self.sampler = constructor(sd_model)
self.orig_p_sample_ddim = self.sampler.p_sample_ddim if hasattr(self.sampler, 'p_sample_ddim') else self.sampler.p_sample_plms
self.mask = None
self.nmask = None
self.init_latent = None
self.sampler_noises = None
self.step = 0
self.stop_at = None
self.eta = None
self.default_eta = 0.0
self.config = None
self.last_latent = None
self.conditioning_key = sd_model.model.conditioning_key
def number_of_needed_noises(self, p):
return 0
def launch_sampling(self, steps, func):
state.sampling_steps = steps
state.sampling_step = 0
try:
return func()
except InterruptedException:
return self.last_latent
def p_sample_ddim_hook(self, x_dec, cond, ts, unconditional_conditioning, *args, **kwargs):
if state.interrupted or state.skipped:
raise InterruptedException
if self.stop_at is not None and self.step > self.stop_at:
raise InterruptedException
# Have to unwrap the inpainting conditioning here to perform pre-processing
image_conditioning = None
if isinstance(cond, dict):
image_conditioning = cond["c_concat"][0]
cond = cond["c_crossattn"][0]
unconditional_conditioning = unconditional_conditioning["c_crossattn"][0]
conds_list, tensor = prompt_parser.reconstruct_multicond_batch(cond, self.step)
unconditional_conditioning = prompt_parser.reconstruct_cond_batch(unconditional_conditioning, self.step)
assert all([len(conds) == 1 for conds in conds_list]), 'composition via AND is not supported for DDIM/PLMS samplers'
cond = tensor
# for DDIM, shapes must match, we can't just process cond and uncond independently;
# filling unconditional_conditioning with repeats of the last vector to match length is
# not 100% correct but should work well enough
if unconditional_conditioning.shape[1] < cond.shape[1]:
last_vector = unconditional_conditioning[:, -1:]
last_vector_repeated = last_vector.repeat([1, cond.shape[1] - unconditional_conditioning.shape[1], 1])
unconditional_conditioning = torch.hstack([unconditional_conditioning, last_vector_repeated])
elif unconditional_conditioning.shape[1] > cond.shape[1]:
unconditional_conditioning = unconditional_conditioning[:, :cond.shape[1]]
if self.mask is not None:
img_orig = self.sampler.model.q_sample(self.init_latent, ts)
x_dec = img_orig * self.mask + self.nmask * x_dec
# Wrap the image conditioning back up since the DDIM code can accept the dict directly.
# Note that they need to be lists because it just concatenates them later.
if image_conditioning is not None:
cond = {"c_concat": [image_conditioning], "c_crossattn": [cond]}
unconditional_conditioning = {"c_concat": [image_conditioning], "c_crossattn": [unconditional_conditioning]}
res = self.orig_p_sample_ddim(x_dec, cond, ts, unconditional_conditioning=unconditional_conditioning, *args, **kwargs)
if self.mask is not None:
self.last_latent = self.init_latent * self.mask + self.nmask * res[1]
else:
self.last_latent = res[1]
store_latent(self.last_latent)
self.step += 1
state.sampling_step = self.step
shared.total_tqdm.update()
return res
def initialize(self, p):
self.eta = p.eta if p.eta is not None else opts.eta_ddim
for fieldname in ['p_sample_ddim', 'p_sample_plms']:
if hasattr(self.sampler, fieldname):
setattr(self.sampler, fieldname, self.p_sample_ddim_hook)
self.mask = p.mask if hasattr(p, 'mask') else None
self.nmask = p.nmask if hasattr(p, 'nmask') else None
def adjust_steps_if_invalid(self, p, num_steps):
if (self.config.name == 'DDIM' and p.ddim_discretize == 'uniform') or (self.config.name == 'PLMS'):
valid_step = 999 / (1000 // num_steps)
if valid_step == floor(valid_step):
return int(valid_step) + 1
return num_steps
def sample_img2img(self, p, x, noise, conditioning, unconditional_conditioning, steps=None, image_conditioning=None):
steps, t_enc = setup_img2img_steps(p, steps)
steps = self.adjust_steps_if_invalid(p, steps)
self.initialize(p)
self.sampler.make_schedule(ddim_num_steps=steps, ddim_eta=self.eta, ddim_discretize=p.ddim_discretize, verbose=False)
x1 = self.sampler.stochastic_encode(x, torch.tensor([t_enc] * int(x.shape[0])).to(shared.device), noise=noise)
self.init_latent = x
self.last_latent = x
self.step = 0
# Wrap the conditioning models with additional image conditioning for inpainting model
if image_conditioning is not None:
conditioning = {"c_concat": [image_conditioning], "c_crossattn": [conditioning]}
unconditional_conditioning = {"c_concat": [image_conditioning], "c_crossattn": [unconditional_conditioning]}
samples = self.launch_sampling(t_enc + 1, lambda: self.sampler.decode(x1, conditioning, t_enc, unconditional_guidance_scale=p.cfg_scale, unconditional_conditioning=unconditional_conditioning))
return samples
def sample(self, p, x, conditioning, unconditional_conditioning, steps=None, image_conditioning=None):
self.initialize(p)
self.init_latent = None
self.last_latent = x
self.step = 0
steps = self.adjust_steps_if_invalid(p, steps or p.steps)
# Wrap the conditioning models with additional image conditioning for inpainting model
if image_conditioning is not None:
conditioning = {"c_concat": [image_conditioning], "c_crossattn": [conditioning]}
unconditional_conditioning = {"c_concat": [image_conditioning], "c_crossattn": [unconditional_conditioning]}
samples_ddim = self.launch_sampling(steps, lambda: self.sampler.sample(S=steps, conditioning=conditioning, batch_size=int(x.shape[0]), shape=x[0].shape, verbose=False, unconditional_guidance_scale=p.cfg_scale, unconditional_conditioning=unconditional_conditioning, x_T=x, eta=self.eta)[0])
return samples_ddim
class CFGDenoiser(torch.nn.Module):
def __init__(self, model):
super().__init__()
self.inner_model = model
self.mask = None
self.nmask = None
self.init_latent = None
self.step = 0
def forward(self, x, sigma, uncond, cond, cond_scale, image_cond):
if state.interrupted or state.skipped:
raise InterruptedException
conds_list, tensor = prompt_parser.reconstruct_multicond_batch(cond, self.step)
uncond = prompt_parser.reconstruct_cond_batch(uncond, self.step)
batch_size = len(conds_list)
repeats = [len(conds_list[i]) for i in range(batch_size)]
x_in = torch.cat([torch.stack([x[i] for _ in range(n)]) for i, n in enumerate(repeats)] + [x])
image_cond_in = torch.cat([torch.stack([image_cond[i] for _ in range(n)]) for i, n in enumerate(repeats)] + [image_cond])
sigma_in = torch.cat([torch.stack([sigma[i] for _ in range(n)]) for i, n in enumerate(repeats)] + [sigma])
denoiser_params = CFGDenoiserParams(x_in, image_cond_in, sigma_in, state.sampling_step, state.sampling_steps)
cfg_denoiser_callback(denoiser_params)
x_in = denoiser_params.x
image_cond_in = denoiser_params.image_cond
sigma_in = denoiser_params.sigma
if tensor.shape[1] == uncond.shape[1]:
cond_in = torch.cat([tensor, uncond])
if shared.batch_cond_uncond:
x_out = self.inner_model(x_in, sigma_in, cond={"c_crossattn": [cond_in], "c_concat": [image_cond_in]})
else:
x_out = torch.zeros_like(x_in)
for batch_offset in range(0, x_out.shape[0], batch_size):
a = batch_offset
b = a + batch_size
x_out[a:b] = self.inner_model(x_in[a:b], sigma_in[a:b], cond={"c_crossattn": [cond_in[a:b]], "c_concat": [image_cond_in[a:b]]})
else:
x_out = torch.zeros_like(x_in)
batch_size = batch_size*2 if shared.batch_cond_uncond else batch_size
for batch_offset in range(0, tensor.shape[0], batch_size):
a = batch_offset
b = min(a + batch_size, tensor.shape[0])
x_out[a:b] = self.inner_model(x_in[a:b], sigma_in[a:b], cond={"c_crossattn": [tensor[a:b]], "c_concat": [image_cond_in[a:b]]})
x_out[-uncond.shape[0]:] = self.inner_model(x_in[-uncond.shape[0]:], sigma_in[-uncond.shape[0]:], cond={"c_crossattn": [uncond], "c_concat": [image_cond_in[-uncond.shape[0]:]]})
denoised_uncond = x_out[-uncond.shape[0]:]
denoised = torch.clone(denoised_uncond)
for i, conds in enumerate(conds_list):
for cond_index, weight in conds:
denoised[i] += (x_out[cond_index] - denoised_uncond[i]) * (weight * cond_scale)
if self.mask is not None:
denoised = self.init_latent * self.mask + self.nmask * denoised
self.step += 1
return denoised
class TorchHijack:
def __init__(self, kdiff_sampler):
self.kdiff_sampler = kdiff_sampler
def __getattr__(self, item):
if item == 'randn_like':
return self.kdiff_sampler.randn_like
if hasattr(torch, item):
return getattr(torch, item)
raise AttributeError("'{}' object has no attribute '{}'".format(type(self).__name__, item))
class KDiffusionSampler:
def __init__(self, funcname, sd_model):
self.model_wrap = k_diffusion.external.CompVisDenoiser(sd_model, quantize=shared.opts.enable_quantization)
self.funcname = funcname
self.func = getattr(k_diffusion.sampling, self.funcname)
self.extra_params = sampler_extra_params.get(funcname, [])
self.model_wrap_cfg = CFGDenoiser(self.model_wrap)
self.sampler_noises = None
self.sampler_noise_index = 0
self.stop_at = None
self.eta = None
self.default_eta = 1.0
self.config = None
self.last_latent = None
self.conditioning_key = sd_model.model.conditioning_key
def callback_state(self, d):
step = d['i']
latent = d["denoised"]
store_latent(latent)
self.last_latent = latent
if self.stop_at is not None and step > self.stop_at:
raise InterruptedException
state.sampling_step = step
shared.total_tqdm.update()
def launch_sampling(self, steps, func):
state.sampling_steps = steps
state.sampling_step = 0
try:
return func()
except InterruptedException:
return self.last_latent
def number_of_needed_noises(self, p):
return p.steps
def randn_like(self, x):
noise = self.sampler_noises[self.sampler_noise_index] if self.sampler_noises is not None and self.sampler_noise_index < len(self.sampler_noises) else None
if noise is not None and x.shape == noise.shape:
res = noise
else:
res = torch.randn_like(x)
self.sampler_noise_index += 1
return res
def initialize(self, p):
self.model_wrap_cfg.mask = p.mask if hasattr(p, 'mask') else None
self.model_wrap_cfg.nmask = p.nmask if hasattr(p, 'nmask') else None
self.model_wrap.step = 0
self.sampler_noise_index = 0
self.eta = p.eta or opts.eta_ancestral
if self.sampler_noises is not None:
k_diffusion.sampling.torch = TorchHijack(self)
extra_params_kwargs = {}
for param_name in self.extra_params:
if hasattr(p, param_name) and param_name in inspect.signature(self.func).parameters:
extra_params_kwargs[param_name] = getattr(p, param_name)
if 'eta' in inspect.signature(self.func).parameters:
extra_params_kwargs['eta'] = self.eta
return extra_params_kwargs
def sample_img2img(self, p, x, noise, conditioning, unconditional_conditioning, steps=None, image_conditioning=None):
steps, t_enc = setup_img2img_steps(p, steps)
if p.sampler_noise_scheduler_override:
sigmas = p.sampler_noise_scheduler_override(steps)
elif self.config is not None and self.config.options.get('scheduler', None) == 'karras':
sigmas = k_diffusion.sampling.get_sigmas_karras(n=steps, sigma_min=0.1, sigma_max=10, device=shared.device)
else:
sigmas = self.model_wrap.get_sigmas(steps)
sigma_sched = sigmas[steps - t_enc - 1:]
xi = x + noise * sigma_sched[0]
extra_params_kwargs = self.initialize(p)
if 'sigma_min' in inspect.signature(self.func).parameters:
## last sigma is zero which isn't allowed by DPM Fast & Adaptive so taking value before last
extra_params_kwargs['sigma_min'] = sigma_sched[-2]
if 'sigma_max' in inspect.signature(self.func).parameters:
extra_params_kwargs['sigma_max'] = sigma_sched[0]
if 'n' in inspect.signature(self.func).parameters:
extra_params_kwargs['n'] = len(sigma_sched) - 1
if 'sigma_sched' in inspect.signature(self.func).parameters:
extra_params_kwargs['sigma_sched'] = sigma_sched
if 'sigmas' in inspect.signature(self.func).parameters:
extra_params_kwargs['sigmas'] = sigma_sched
self.model_wrap_cfg.init_latent = x
self.last_latent = x
samples = self.launch_sampling(t_enc + 1, lambda: self.func(self.model_wrap_cfg, xi, extra_args={
'cond': conditioning,
'image_cond': image_conditioning,
'uncond': unconditional_conditioning,
'cond_scale': p.cfg_scale
}, disable=False, callback=self.callback_state, **extra_params_kwargs))
return samples
def sample(self, p, x, conditioning, unconditional_conditioning, steps=None, image_conditioning = None):
steps = steps or p.steps
if p.sampler_noise_scheduler_override:
sigmas = p.sampler_noise_scheduler_override(steps)
elif self.config is not None and self.config.options.get('scheduler', None) == 'karras':
sigmas = k_diffusion.sampling.get_sigmas_karras(n=steps, sigma_min=0.1, sigma_max=10, device=shared.device)
else:
sigmas = self.model_wrap.get_sigmas(steps)
x = x * sigmas[0]
extra_params_kwargs = self.initialize(p)
if 'sigma_min' in inspect.signature(self.func).parameters:
extra_params_kwargs['sigma_min'] = self.model_wrap.sigmas[0].item()
extra_params_kwargs['sigma_max'] = self.model_wrap.sigmas[-1].item()
if 'n' in inspect.signature(self.func).parameters:
extra_params_kwargs['n'] = steps
else:
extra_params_kwargs['sigmas'] = sigmas
self.last_latent = x
samples = self.launch_sampling(steps, lambda: self.func(self.model_wrap_cfg, x, extra_args={
'cond': conditioning,
'image_cond': image_conditioning,
'uncond': unconditional_conditioning,
'cond_scale': p.cfg_scale
}, disable=False, callback=self.callback_state, **extra_params_kwargs))
return samples
<|code_end|>
|
launch.py
<|code_start|># this scripts installs necessary requirements and launches main program in webui.py
import subprocess
import os
import sys
import importlib.util
import shlex
import platform
dir_repos = "repositories"
dir_extensions = "extensions"
python = sys.executable
git = os.environ.get('GIT', "git")
index_url = os.environ.get('INDEX_URL', "")
def extract_arg(args, name):
return [x for x in args if x != name], name in args
def run(command, desc=None, errdesc=None, custom_env=None):
if desc is not None:
print(desc)
result = subprocess.run(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True, env=os.environ if custom_env is None else custom_env)
if result.returncode != 0:
message = f"""{errdesc or 'Error running command'}.
Command: {command}
Error code: {result.returncode}
stdout: {result.stdout.decode(encoding="utf8", errors="ignore") if len(result.stdout)>0 else '<empty>'}
stderr: {result.stderr.decode(encoding="utf8", errors="ignore") if len(result.stderr)>0 else '<empty>'}
"""
raise RuntimeError(message)
return result.stdout.decode(encoding="utf8", errors="ignore")
def check_run(command):
result = subprocess.run(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
return result.returncode == 0
def is_installed(package):
try:
spec = importlib.util.find_spec(package)
except ModuleNotFoundError:
return False
return spec is not None
def repo_dir(name):
return os.path.join(dir_repos, name)
def run_python(code, desc=None, errdesc=None):
return run(f'"{python}" -c "{code}"', desc, errdesc)
def run_pip(args, desc=None):
index_url_line = f' --index-url {index_url}' if index_url != '' else ''
return run(f'"{python}" -m pip {args} --prefer-binary{index_url_line}', desc=f"Installing {desc}", errdesc=f"Couldn't install {desc}")
def check_run_python(code):
return check_run(f'"{python}" -c "{code}"')
def git_clone(url, dir, name, commithash=None):
# TODO clone into temporary dir and move if successful
if os.path.exists(dir):
if commithash is None:
return
current_hash = run(f'"{git}" -C {dir} rev-parse HEAD', None, f"Couldn't determine {name}'s hash: {commithash}").strip()
if current_hash == commithash:
return
run(f'"{git}" -C {dir} fetch', f"Fetching updates for {name}...", f"Couldn't fetch {name}")
run(f'"{git}" -C {dir} checkout {commithash}', f"Checking out commit for {name} with hash: {commithash}...", f"Couldn't checkout commit {commithash} for {name}")
return
run(f'"{git}" clone "{url}" "{dir}"', f"Cloning {name} into {dir}...", f"Couldn't clone {name}")
if commithash is not None:
run(f'"{git}" -C {dir} checkout {commithash}', None, "Couldn't checkout {name}'s hash: {commithash}")
def version_check(commit):
try:
import requests
commits = requests.get('https://api.github.com/repos/AUTOMATIC1111/stable-diffusion-webui/branches/master').json()
if commit != "<none>" and commits['commit']['sha'] != commit:
print("--------------------------------------------------------")
print("| You are not up to date with the most recent release. |")
print("| Consider running `git pull` to update. |")
print("--------------------------------------------------------")
elif commits['commit']['sha'] == commit:
print("You are up to date with the most recent release.")
else:
print("Not a git clone, can't perform version check.")
except Exception as e:
print("version check failed", e)
def run_extensions_installers():
if not os.path.isdir(dir_extensions):
return
for dirname_extension in os.listdir(dir_extensions):
path_installer = os.path.join(dir_extensions, dirname_extension, "install.py")
if not os.path.isfile(path_installer):
continue
try:
env = os.environ.copy()
env['PYTHONPATH'] = os.path.abspath(".")
print(run(f'"{python}" "{path_installer}"', errdesc=f"Error running install.py for extension {dirname_extension}", custom_env=env))
except Exception as e:
print(e, file=sys.stderr)
def prepare_enviroment():
torch_command = os.environ.get('TORCH_COMMAND', "pip install torch==1.12.1+cu113 torchvision==0.13.1+cu113 --extra-index-url https://download.pytorch.org/whl/cu113")
requirements_file = os.environ.get('REQS_FILE', "requirements_versions.txt")
commandline_args = os.environ.get('COMMANDLINE_ARGS', "")
gfpgan_package = os.environ.get('GFPGAN_PACKAGE', "git+https://github.com/TencentARC/GFPGAN.git@8d2447a2d918f8eba5a4a01463fd48e45126a379")
clip_package = os.environ.get('CLIP_PACKAGE', "git+https://github.com/openai/CLIP.git@d50d76daa670286dd6cacf3bcd80b5e4823fc8e1")
deepdanbooru_package = os.environ.get('DEEPDANBOORU_PACKAGE', "git+https://github.com/KichangKim/DeepDanbooru.git@d91a2963bf87c6a770d74894667e9ffa9f6de7ff")
xformers_windows_package = os.environ.get('XFORMERS_WINDOWS_PACKAGE', 'https://github.com/C43H66N12O12S2/stable-diffusion-webui/releases/download/f/xformers-0.0.14.dev0-cp310-cp310-win_amd64.whl')
stable_diffusion_repo = os.environ.get('STABLE_DIFFUSION_REPO', "https://github.com/CompVis/stable-diffusion.git")
taming_transformers_repo = os.environ.get('TAMING_REANSFORMERS_REPO', "https://github.com/CompVis/taming-transformers.git")
k_diffusion_repo = os.environ.get('K_DIFFUSION_REPO', 'https://github.com/crowsonkb/k-diffusion.git')
codeformer_repo = os.environ.get('CODEFORMET_REPO', 'https://github.com/sczhou/CodeFormer.git')
blip_repo = os.environ.get('BLIP_REPO', 'https://github.com/salesforce/BLIP.git')
stable_diffusion_commit_hash = os.environ.get('STABLE_DIFFUSION_COMMIT_HASH', "69ae4b35e0a0f6ee1af8bb9a5d0016ccb27e36dc")
taming_transformers_commit_hash = os.environ.get('TAMING_TRANSFORMERS_COMMIT_HASH', "24268930bf1dce879235a7fddd0b2355b84d7ea6")
k_diffusion_commit_hash = os.environ.get('K_DIFFUSION_COMMIT_HASH', "60e5042ca0da89c14d1dd59d73883280f8fce991")
codeformer_commit_hash = os.environ.get('CODEFORMER_COMMIT_HASH', "c5b4593074ba6214284d6acd5f1719b6c5d739af")
blip_commit_hash = os.environ.get('BLIP_COMMIT_HASH', "48211a1594f1321b00f14c9f7a5b4813144b2fb9")
sys.argv += shlex.split(commandline_args)
test_argv = [x for x in sys.argv if x != '--tests']
sys.argv, skip_torch_cuda_test = extract_arg(sys.argv, '--skip-torch-cuda-test')
sys.argv, reinstall_xformers = extract_arg(sys.argv, '--reinstall-xformers')
sys.argv, update_check = extract_arg(sys.argv, '--update-check')
sys.argv, run_tests = extract_arg(sys.argv, '--tests')
xformers = '--xformers' in sys.argv
deepdanbooru = '--deepdanbooru' in sys.argv
ngrok = '--ngrok' in sys.argv
try:
commit = run(f"{git} rev-parse HEAD").strip()
except Exception:
commit = "<none>"
print(f"Python {sys.version}")
print(f"Commit hash: {commit}")
if not is_installed("torch") or not is_installed("torchvision"):
run(f'"{python}" -m {torch_command}', "Installing torch and torchvision", "Couldn't install torch")
if not skip_torch_cuda_test:
run_python("import torch; assert torch.cuda.is_available(), 'Torch is not able to use GPU; add --skip-torch-cuda-test to COMMANDLINE_ARGS variable to disable this check'")
if not is_installed("gfpgan"):
run_pip(f"install {gfpgan_package}", "gfpgan")
if not is_installed("clip"):
run_pip(f"install {clip_package}", "clip")
if (not is_installed("xformers") or reinstall_xformers) and xformers:
if platform.system() == "Windows":
if platform.python_version().startswith("3.10"):
run_pip(f"install -U -I --no-deps {xformers_windows_package}", "xformers")
else:
print("Installation of xformers is not supported in this version of Python.")
print("You can also check this and build manually: https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Xformers#building-xformers-on-windows-by-duckness")
if not is_installed("xformers"):
exit(0)
elif platform.system() == "Linux":
run_pip("install xformers", "xformers")
if not is_installed("deepdanbooru") and deepdanbooru:
run_pip(f"install {deepdanbooru_package}#egg=deepdanbooru[tensorflow] tensorflow==2.10.0 tensorflow-io==0.27.0", "deepdanbooru")
if not is_installed("pyngrok") and ngrok:
run_pip("install pyngrok", "ngrok")
os.makedirs(dir_repos, exist_ok=True)
git_clone(stable_diffusion_repo, repo_dir('stable-diffusion'), "Stable Diffusion", stable_diffusion_commit_hash)
git_clone(taming_transformers_repo, repo_dir('taming-transformers'), "Taming Transformers", taming_transformers_commit_hash)
git_clone(k_diffusion_repo, repo_dir('k-diffusion'), "K-diffusion", k_diffusion_commit_hash)
git_clone(codeformer_repo, repo_dir('CodeFormer'), "CodeFormer", codeformer_commit_hash)
git_clone(blip_repo, repo_dir('BLIP'), "BLIP", blip_commit_hash)
if not is_installed("lpips"):
run_pip(f"install -r {os.path.join(repo_dir('CodeFormer'), 'requirements.txt')}", "requirements for CodeFormer")
run_pip(f"install -r {requirements_file}", "requirements for Web UI")
run_extensions_installers()
if update_check:
version_check(commit)
if "--exit" in sys.argv:
print("Exiting because of --exit argument")
exit(0)
if run_tests:
tests(test_argv)
exit(0)
def tests(argv):
if "--api" not in argv:
argv.append("--api")
print(f"Launching Web UI in another process for testing with arguments: {' '.join(argv[1:])}")
with open('test/stdout.txt', "w", encoding="utf8") as stdout, open('test/stderr.txt', "w", encoding="utf8") as stderr:
proc = subprocess.Popen([sys.executable, *argv], stdout=stdout, stderr=stderr)
import test.server_poll
test.server_poll.run_tests()
print(f"Stopping Web UI process with id {proc.pid}")
proc.kill()
def start():
print(f"Launching {'API server' if '--nowebui' in sys.argv else 'Web UI'} with arguments: {' '.join(sys.argv[1:])}")
import webui
if '--nowebui' in sys.argv:
webui.api_only()
else:
webui.webui()
if __name__ == "__main__":
prepare_enviroment()
start()
<|code_end|>
modules/sd_samplers.py
<|code_start|>from collections import namedtuple
import numpy as np
from math import floor
import torch
import tqdm
from PIL import Image
import inspect
import k_diffusion.sampling
import ldm.models.diffusion.ddim
import ldm.models.diffusion.plms
from modules import prompt_parser, devices, processing, images
from modules.shared import opts, cmd_opts, state
import modules.shared as shared
from modules.script_callbacks import CFGDenoiserParams, cfg_denoiser_callback
SamplerData = namedtuple('SamplerData', ['name', 'constructor', 'aliases', 'options'])
samplers_k_diffusion = [
('Euler a', 'sample_euler_ancestral', ['k_euler_a'], {}),
('Euler', 'sample_euler', ['k_euler'], {}),
('LMS', 'sample_lms', ['k_lms'], {}),
('Heun', 'sample_heun', ['k_heun'], {}),
('DPM2', 'sample_dpm_2', ['k_dpm_2'], {}),
('DPM2 a', 'sample_dpm_2_ancestral', ['k_dpm_2_a'], {}),
('DPM-Solver++(2S) a', 'sample_dpmpp_2s_ancestral', ['k_dpmpp_2s_a'], {}),
('DPM-Solver++(2M)', 'sample_dpmpp_2m', ['k_dpmpp_2m'], {}),
('DPM fast', 'sample_dpm_fast', ['k_dpm_fast'], {}),
('DPM adaptive', 'sample_dpm_adaptive', ['k_dpm_ad'], {}),
('LMS Karras', 'sample_lms', ['k_lms_ka'], {'scheduler': 'karras'}),
('DPM2 Karras', 'sample_dpm_2', ['k_dpm_2_ka'], {'scheduler': 'karras'}),
('DPM2 a Karras', 'sample_dpm_2_ancestral', ['k_dpm_2_a_ka'], {'scheduler': 'karras'}),
('DPM-Solver++(2S) a Karras', 'sample_dpmpp_2s_ancestral', ['k_dpmpp_2s_a_ka'], {'scheduler': 'karras'}),
('DPM-Solver++(2M) Karras', 'sample_dpmpp_2m', ['k_dpmpp_2m_ka'], {'scheduler': 'karras'}),
]
samplers_data_k_diffusion = [
SamplerData(label, lambda model, funcname=funcname: KDiffusionSampler(funcname, model), aliases, options)
for label, funcname, aliases, options in samplers_k_diffusion
if hasattr(k_diffusion.sampling, funcname)
]
all_samplers = [
*samplers_data_k_diffusion,
SamplerData('DDIM', lambda model: VanillaStableDiffusionSampler(ldm.models.diffusion.ddim.DDIMSampler, model), [], {}),
SamplerData('PLMS', lambda model: VanillaStableDiffusionSampler(ldm.models.diffusion.plms.PLMSSampler, model), [], {}),
]
samplers = []
samplers_for_img2img = []
def create_sampler_with_index(list_of_configs, index, model):
config = list_of_configs[index]
sampler = config.constructor(model)
sampler.config = config
return sampler
def set_samplers():
global samplers, samplers_for_img2img
hidden = set(opts.hide_samplers)
hidden_img2img = set(opts.hide_samplers + ['PLMS'])
samplers = [x for x in all_samplers if x.name not in hidden]
samplers_for_img2img = [x for x in all_samplers if x.name not in hidden_img2img]
set_samplers()
sampler_extra_params = {
'sample_euler': ['s_churn', 's_tmin', 's_tmax', 's_noise'],
'sample_heun': ['s_churn', 's_tmin', 's_tmax', 's_noise'],
'sample_dpm_2': ['s_churn', 's_tmin', 's_tmax', 's_noise'],
}
def setup_img2img_steps(p, steps=None):
if opts.img2img_fix_steps or steps is not None:
steps = int((steps or p.steps) / min(p.denoising_strength, 0.999)) if p.denoising_strength > 0 else 0
t_enc = p.steps - 1
else:
steps = p.steps
t_enc = int(min(p.denoising_strength, 0.999) * steps)
return steps, t_enc
def single_sample_to_image(sample):
x_sample = processing.decode_first_stage(shared.sd_model, sample.unsqueeze(0))[0]
x_sample = torch.clamp((x_sample + 1.0) / 2.0, min=0.0, max=1.0)
x_sample = 255. * np.moveaxis(x_sample.cpu().numpy(), 0, 2)
x_sample = x_sample.astype(np.uint8)
return Image.fromarray(x_sample)
def sample_to_image(samples, index=0):
return single_sample_to_image(samples[index])
def samples_to_image_grid(samples):
return images.image_grid([single_sample_to_image(sample) for sample in samples])
def store_latent(decoded):
state.current_latent = decoded
if opts.show_progress_every_n_steps > 0 and shared.state.sampling_step % opts.show_progress_every_n_steps == 0:
if not shared.parallel_processing_allowed:
shared.state.current_image = sample_to_image(decoded)
class InterruptedException(BaseException):
pass
class VanillaStableDiffusionSampler:
def __init__(self, constructor, sd_model):
self.sampler = constructor(sd_model)
self.orig_p_sample_ddim = self.sampler.p_sample_ddim if hasattr(self.sampler, 'p_sample_ddim') else self.sampler.p_sample_plms
self.mask = None
self.nmask = None
self.init_latent = None
self.sampler_noises = None
self.step = 0
self.stop_at = None
self.eta = None
self.default_eta = 0.0
self.config = None
self.last_latent = None
self.conditioning_key = sd_model.model.conditioning_key
def number_of_needed_noises(self, p):
return 0
def launch_sampling(self, steps, func):
state.sampling_steps = steps
state.sampling_step = 0
try:
return func()
except InterruptedException:
return self.last_latent
def p_sample_ddim_hook(self, x_dec, cond, ts, unconditional_conditioning, *args, **kwargs):
if state.interrupted or state.skipped:
raise InterruptedException
if self.stop_at is not None and self.step > self.stop_at:
raise InterruptedException
# Have to unwrap the inpainting conditioning here to perform pre-processing
image_conditioning = None
if isinstance(cond, dict):
image_conditioning = cond["c_concat"][0]
cond = cond["c_crossattn"][0]
unconditional_conditioning = unconditional_conditioning["c_crossattn"][0]
conds_list, tensor = prompt_parser.reconstruct_multicond_batch(cond, self.step)
unconditional_conditioning = prompt_parser.reconstruct_cond_batch(unconditional_conditioning, self.step)
assert all([len(conds) == 1 for conds in conds_list]), 'composition via AND is not supported for DDIM/PLMS samplers'
cond = tensor
# for DDIM, shapes must match, we can't just process cond and uncond independently;
# filling unconditional_conditioning with repeats of the last vector to match length is
# not 100% correct but should work well enough
if unconditional_conditioning.shape[1] < cond.shape[1]:
last_vector = unconditional_conditioning[:, -1:]
last_vector_repeated = last_vector.repeat([1, cond.shape[1] - unconditional_conditioning.shape[1], 1])
unconditional_conditioning = torch.hstack([unconditional_conditioning, last_vector_repeated])
elif unconditional_conditioning.shape[1] > cond.shape[1]:
unconditional_conditioning = unconditional_conditioning[:, :cond.shape[1]]
if self.mask is not None:
img_orig = self.sampler.model.q_sample(self.init_latent, ts)
x_dec = img_orig * self.mask + self.nmask * x_dec
# Wrap the image conditioning back up since the DDIM code can accept the dict directly.
# Note that they need to be lists because it just concatenates them later.
if image_conditioning is not None:
cond = {"c_concat": [image_conditioning], "c_crossattn": [cond]}
unconditional_conditioning = {"c_concat": [image_conditioning], "c_crossattn": [unconditional_conditioning]}
res = self.orig_p_sample_ddim(x_dec, cond, ts, unconditional_conditioning=unconditional_conditioning, *args, **kwargs)
if self.mask is not None:
self.last_latent = self.init_latent * self.mask + self.nmask * res[1]
else:
self.last_latent = res[1]
store_latent(self.last_latent)
self.step += 1
state.sampling_step = self.step
shared.total_tqdm.update()
return res
def initialize(self, p):
self.eta = p.eta if p.eta is not None else opts.eta_ddim
for fieldname in ['p_sample_ddim', 'p_sample_plms']:
if hasattr(self.sampler, fieldname):
setattr(self.sampler, fieldname, self.p_sample_ddim_hook)
self.mask = p.mask if hasattr(p, 'mask') else None
self.nmask = p.nmask if hasattr(p, 'nmask') else None
def adjust_steps_if_invalid(self, p, num_steps):
if (self.config.name == 'DDIM' and p.ddim_discretize == 'uniform') or (self.config.name == 'PLMS'):
valid_step = 999 / (1000 // num_steps)
if valid_step == floor(valid_step):
return int(valid_step) + 1
return num_steps
def sample_img2img(self, p, x, noise, conditioning, unconditional_conditioning, steps=None, image_conditioning=None):
steps, t_enc = setup_img2img_steps(p, steps)
steps = self.adjust_steps_if_invalid(p, steps)
self.initialize(p)
self.sampler.make_schedule(ddim_num_steps=steps, ddim_eta=self.eta, ddim_discretize=p.ddim_discretize, verbose=False)
x1 = self.sampler.stochastic_encode(x, torch.tensor([t_enc] * int(x.shape[0])).to(shared.device), noise=noise)
self.init_latent = x
self.last_latent = x
self.step = 0
# Wrap the conditioning models with additional image conditioning for inpainting model
if image_conditioning is not None:
conditioning = {"c_concat": [image_conditioning], "c_crossattn": [conditioning]}
unconditional_conditioning = {"c_concat": [image_conditioning], "c_crossattn": [unconditional_conditioning]}
samples = self.launch_sampling(t_enc + 1, lambda: self.sampler.decode(x1, conditioning, t_enc, unconditional_guidance_scale=p.cfg_scale, unconditional_conditioning=unconditional_conditioning))
return samples
def sample(self, p, x, conditioning, unconditional_conditioning, steps=None, image_conditioning=None):
self.initialize(p)
self.init_latent = None
self.last_latent = x
self.step = 0
steps = self.adjust_steps_if_invalid(p, steps or p.steps)
# Wrap the conditioning models with additional image conditioning for inpainting model
if image_conditioning is not None:
conditioning = {"c_concat": [image_conditioning], "c_crossattn": [conditioning]}
unconditional_conditioning = {"c_concat": [image_conditioning], "c_crossattn": [unconditional_conditioning]}
samples_ddim = self.launch_sampling(steps, lambda: self.sampler.sample(S=steps, conditioning=conditioning, batch_size=int(x.shape[0]), shape=x[0].shape, verbose=False, unconditional_guidance_scale=p.cfg_scale, unconditional_conditioning=unconditional_conditioning, x_T=x, eta=self.eta)[0])
return samples_ddim
class CFGDenoiser(torch.nn.Module):
def __init__(self, model):
super().__init__()
self.inner_model = model
self.mask = None
self.nmask = None
self.init_latent = None
self.step = 0
def forward(self, x, sigma, uncond, cond, cond_scale, image_cond):
if state.interrupted or state.skipped:
raise InterruptedException
conds_list, tensor = prompt_parser.reconstruct_multicond_batch(cond, self.step)
uncond = prompt_parser.reconstruct_cond_batch(uncond, self.step)
batch_size = len(conds_list)
repeats = [len(conds_list[i]) for i in range(batch_size)]
x_in = torch.cat([torch.stack([x[i] for _ in range(n)]) for i, n in enumerate(repeats)] + [x])
image_cond_in = torch.cat([torch.stack([image_cond[i] for _ in range(n)]) for i, n in enumerate(repeats)] + [image_cond])
sigma_in = torch.cat([torch.stack([sigma[i] for _ in range(n)]) for i, n in enumerate(repeats)] + [sigma])
denoiser_params = CFGDenoiserParams(x_in, image_cond_in, sigma_in, state.sampling_step, state.sampling_steps)
cfg_denoiser_callback(denoiser_params)
x_in = denoiser_params.x
image_cond_in = denoiser_params.image_cond
sigma_in = denoiser_params.sigma
if tensor.shape[1] == uncond.shape[1]:
cond_in = torch.cat([tensor, uncond])
if shared.batch_cond_uncond:
x_out = self.inner_model(x_in, sigma_in, cond={"c_crossattn": [cond_in], "c_concat": [image_cond_in]})
else:
x_out = torch.zeros_like(x_in)
for batch_offset in range(0, x_out.shape[0], batch_size):
a = batch_offset
b = a + batch_size
x_out[a:b] = self.inner_model(x_in[a:b], sigma_in[a:b], cond={"c_crossattn": [cond_in[a:b]], "c_concat": [image_cond_in[a:b]]})
else:
x_out = torch.zeros_like(x_in)
batch_size = batch_size*2 if shared.batch_cond_uncond else batch_size
for batch_offset in range(0, tensor.shape[0], batch_size):
a = batch_offset
b = min(a + batch_size, tensor.shape[0])
x_out[a:b] = self.inner_model(x_in[a:b], sigma_in[a:b], cond={"c_crossattn": [tensor[a:b]], "c_concat": [image_cond_in[a:b]]})
x_out[-uncond.shape[0]:] = self.inner_model(x_in[-uncond.shape[0]:], sigma_in[-uncond.shape[0]:], cond={"c_crossattn": [uncond], "c_concat": [image_cond_in[-uncond.shape[0]:]]})
denoised_uncond = x_out[-uncond.shape[0]:]
denoised = torch.clone(denoised_uncond)
for i, conds in enumerate(conds_list):
for cond_index, weight in conds:
denoised[i] += (x_out[cond_index] - denoised_uncond[i]) * (weight * cond_scale)
if self.mask is not None:
denoised = self.init_latent * self.mask + self.nmask * denoised
self.step += 1
return denoised
class TorchHijack:
def __init__(self, kdiff_sampler):
self.kdiff_sampler = kdiff_sampler
def __getattr__(self, item):
if item == 'randn_like':
return self.kdiff_sampler.randn_like
if hasattr(torch, item):
return getattr(torch, item)
raise AttributeError("'{}' object has no attribute '{}'".format(type(self).__name__, item))
class KDiffusionSampler:
def __init__(self, funcname, sd_model):
self.model_wrap = k_diffusion.external.CompVisDenoiser(sd_model, quantize=shared.opts.enable_quantization)
self.funcname = funcname
self.func = getattr(k_diffusion.sampling, self.funcname)
self.extra_params = sampler_extra_params.get(funcname, [])
self.model_wrap_cfg = CFGDenoiser(self.model_wrap)
self.sampler_noises = None
self.sampler_noise_index = 0
self.stop_at = None
self.eta = None
self.default_eta = 1.0
self.config = None
self.last_latent = None
self.conditioning_key = sd_model.model.conditioning_key
def callback_state(self, d):
step = d['i']
latent = d["denoised"]
store_latent(latent)
self.last_latent = latent
if self.stop_at is not None and step > self.stop_at:
raise InterruptedException
state.sampling_step = step
shared.total_tqdm.update()
def launch_sampling(self, steps, func):
state.sampling_steps = steps
state.sampling_step = 0
try:
return func()
except InterruptedException:
return self.last_latent
def number_of_needed_noises(self, p):
return p.steps
def randn_like(self, x):
noise = self.sampler_noises[self.sampler_noise_index] if self.sampler_noises is not None and self.sampler_noise_index < len(self.sampler_noises) else None
if noise is not None and x.shape == noise.shape:
res = noise
else:
res = torch.randn_like(x)
self.sampler_noise_index += 1
return res
def initialize(self, p):
self.model_wrap_cfg.mask = p.mask if hasattr(p, 'mask') else None
self.model_wrap_cfg.nmask = p.nmask if hasattr(p, 'nmask') else None
self.model_wrap.step = 0
self.sampler_noise_index = 0
self.eta = p.eta or opts.eta_ancestral
if self.sampler_noises is not None:
k_diffusion.sampling.torch = TorchHijack(self)
extra_params_kwargs = {}
for param_name in self.extra_params:
if hasattr(p, param_name) and param_name in inspect.signature(self.func).parameters:
extra_params_kwargs[param_name] = getattr(p, param_name)
if 'eta' in inspect.signature(self.func).parameters:
extra_params_kwargs['eta'] = self.eta
return extra_params_kwargs
def sample_img2img(self, p, x, noise, conditioning, unconditional_conditioning, steps=None, image_conditioning=None):
steps, t_enc = setup_img2img_steps(p, steps)
if p.sampler_noise_scheduler_override:
sigmas = p.sampler_noise_scheduler_override(steps)
elif self.config is not None and self.config.options.get('scheduler', None) == 'karras':
sigmas = k_diffusion.sampling.get_sigmas_karras(n=steps, sigma_min=0.1, sigma_max=10, device=shared.device)
else:
sigmas = self.model_wrap.get_sigmas(steps)
sigma_sched = sigmas[steps - t_enc - 1:]
xi = x + noise * sigma_sched[0]
extra_params_kwargs = self.initialize(p)
if 'sigma_min' in inspect.signature(self.func).parameters:
## last sigma is zero which isn't allowed by DPM Fast & Adaptive so taking value before last
extra_params_kwargs['sigma_min'] = sigma_sched[-2]
if 'sigma_max' in inspect.signature(self.func).parameters:
extra_params_kwargs['sigma_max'] = sigma_sched[0]
if 'n' in inspect.signature(self.func).parameters:
extra_params_kwargs['n'] = len(sigma_sched) - 1
if 'sigma_sched' in inspect.signature(self.func).parameters:
extra_params_kwargs['sigma_sched'] = sigma_sched
if 'sigmas' in inspect.signature(self.func).parameters:
extra_params_kwargs['sigmas'] = sigma_sched
self.model_wrap_cfg.init_latent = x
self.last_latent = x
samples = self.launch_sampling(t_enc + 1, lambda: self.func(self.model_wrap_cfg, xi, extra_args={
'cond': conditioning,
'image_cond': image_conditioning,
'uncond': unconditional_conditioning,
'cond_scale': p.cfg_scale
}, disable=False, callback=self.callback_state, **extra_params_kwargs))
return samples
def sample(self, p, x, conditioning, unconditional_conditioning, steps=None, image_conditioning = None):
steps = steps or p.steps
if p.sampler_noise_scheduler_override:
sigmas = p.sampler_noise_scheduler_override(steps)
elif self.config is not None and self.config.options.get('scheduler', None) == 'karras':
sigmas = k_diffusion.sampling.get_sigmas_karras(n=steps, sigma_min=0.1, sigma_max=10, device=shared.device)
else:
sigmas = self.model_wrap.get_sigmas(steps)
x = x * sigmas[0]
extra_params_kwargs = self.initialize(p)
if 'sigma_min' in inspect.signature(self.func).parameters:
extra_params_kwargs['sigma_min'] = self.model_wrap.sigmas[0].item()
extra_params_kwargs['sigma_max'] = self.model_wrap.sigmas[-1].item()
if 'n' in inspect.signature(self.func).parameters:
extra_params_kwargs['n'] = steps
else:
extra_params_kwargs['sigmas'] = sigmas
self.last_latent = x
samples = self.launch_sampling(steps, lambda: self.func(self.model_wrap_cfg, x, extra_args={
'cond': conditioning,
'image_cond': image_conditioning,
'uncond': unconditional_conditioning,
'cond_scale': p.cfg_scale
}, disable=False, callback=self.callback_state, **extra_params_kwargs))
return samples
<|code_end|>
|
https (ssl) support
**Is your feature request related to a problem? Please describe.**
ability to start it in https (ssl crypted) mode, specifying the cert+key+servername on the cli.
**Describe the solution you'd like**
gradio already implemented it, just pass the ssl_keyfile and ssl_certfile parameters to launch():
https://github.com/gradio-app/gradio/issues/563
and it's recommended (required?) to use real hostname instead of 0.0.0.0, so --listen should have a parameter to specify name/ip address.
i've modified your webui.py to test and it works fine:
demo.launch(
share=cmd_opts.share,
server_name="xxx.yyy.hu",
server_port=cmd_opts.port,
debug=cmd_opts.gradio_debug,
auth=[tuple(cred.split(':')) for cred in cmd_opts.gradio_auth.strip('"').split(',')] if cmd_opts.gradio_auth else None,
inbrowser=cmd_opts.autolaunch,
ssl_keyfile="/etc/ssl/xxx.yyy.key",
ssl_certfile="/etc/ssl/xxx.yyy.pem"
)
**Describe alternatives you've considered**
first i've setup an apache reverse proxy (proxypass to http://127.0.0.1:7860) but it timeouts after a few minutes, so if the render took several minutes then you'll never receive it and the whole ui becames unresponsible:
[Fri Sep 23 21:03:51.138645 2022] [proxy_http:error] [pid 3946:tid 139649922103040] (70007)The timeout specified has expired: [client 37.191.45.201:57969] AH01102: error reading status line from remote server 127.0.0.1:7860
[Fri Sep 23 21:03:51.138669 2022] [proxy:error] [pid 3946:tid 139649922103040] [client 37.191.45.201:57969] AH00898: Error reading from remote server returned by /api/predict/
**Additional context**
the whole https thing is REQUIRED for browser notifications to work, because (at least in firefox) it's not allowed with http urls.
|
modules/shared.py
<|code_start|>import argparse
import datetime
import json
import os
import sys
from collections import OrderedDict
import time
import gradio as gr
import tqdm
import modules.artists
import modules.interrogate
import modules.memmon
import modules.sd_models
import modules.styles
import modules.devices as devices
from modules import sd_samplers, sd_models, localization, sd_vae
from modules.hypernetworks import hypernetwork
from modules.paths import models_path, script_path, sd_path
sd_model_file = os.path.join(script_path, 'model.ckpt')
default_sd_model_file = sd_model_file
parser = argparse.ArgumentParser()
parser.add_argument("--config", type=str, default=os.path.join(sd_path, "configs/stable-diffusion/v1-inference.yaml"), help="path to config which constructs model",)
parser.add_argument("--ckpt", type=str, default=sd_model_file, help="path to checkpoint of stable diffusion model; if specified, this checkpoint will be added to the list of checkpoints and loaded",)
parser.add_argument("--ckpt-dir", type=str, default=None, help="Path to directory with stable diffusion checkpoints")
parser.add_argument("--gfpgan-dir", type=str, help="GFPGAN directory", default=('./src/gfpgan' if os.path.exists('./src/gfpgan') else './GFPGAN'))
parser.add_argument("--gfpgan-model", type=str, help="GFPGAN model file name", default=None)
parser.add_argument("--no-half", action='store_true', help="do not switch the model to 16-bit floats")
parser.add_argument("--no-half-vae", action='store_true', help="do not switch the VAE model to 16-bit floats")
parser.add_argument("--no-progressbar-hiding", action='store_true', help="do not hide progressbar in gradio UI (we hide it because it slows down ML if you have hardware acceleration in browser)")
parser.add_argument("--max-batch-count", type=int, default=16, help="maximum batch count value for the UI")
parser.add_argument("--embeddings-dir", type=str, default=os.path.join(script_path, 'embeddings'), help="embeddings directory for textual inversion (default: embeddings)")
parser.add_argument("--hypernetwork-dir", type=str, default=os.path.join(models_path, 'hypernetworks'), help="hypernetwork directory")
parser.add_argument("--localizations-dir", type=str, default=os.path.join(script_path, 'localizations'), help="localizations directory")
parser.add_argument("--allow-code", action='store_true', help="allow custom script execution from webui")
parser.add_argument("--medvram", action='store_true', help="enable stable diffusion model optimizations for sacrificing a little speed for low VRM usage")
parser.add_argument("--lowvram", action='store_true', help="enable stable diffusion model optimizations for sacrificing a lot of speed for very low VRM usage")
parser.add_argument("--lowram", action='store_true', help="load stable diffusion checkpoint weights to VRAM instead of RAM")
parser.add_argument("--always-batch-cond-uncond", action='store_true', help="disables cond/uncond batching that is enabled to save memory with --medvram or --lowvram")
parser.add_argument("--unload-gfpgan", action='store_true', help="does not do anything.")
parser.add_argument("--precision", type=str, help="evaluate at this precision", choices=["full", "autocast"], default="autocast")
parser.add_argument("--share", action='store_true', help="use share=True for gradio and make the UI accessible through their site")
parser.add_argument("--ngrok", type=str, help="ngrok authtoken, alternative to gradio --share", default=None)
parser.add_argument("--ngrok-region", type=str, help="The region in which ngrok should start.", default="us")
parser.add_argument("--enable-insecure-extension-access", action='store_true', help="enable extensions tab regardless of other options")
parser.add_argument("--codeformer-models-path", type=str, help="Path to directory with codeformer model file(s).", default=os.path.join(models_path, 'Codeformer'))
parser.add_argument("--gfpgan-models-path", type=str, help="Path to directory with GFPGAN model file(s).", default=os.path.join(models_path, 'GFPGAN'))
parser.add_argument("--esrgan-models-path", type=str, help="Path to directory with ESRGAN model file(s).", default=os.path.join(models_path, 'ESRGAN'))
parser.add_argument("--bsrgan-models-path", type=str, help="Path to directory with BSRGAN model file(s).", default=os.path.join(models_path, 'BSRGAN'))
parser.add_argument("--realesrgan-models-path", type=str, help="Path to directory with RealESRGAN model file(s).", default=os.path.join(models_path, 'RealESRGAN'))
parser.add_argument("--scunet-models-path", type=str, help="Path to directory with ScuNET model file(s).", default=os.path.join(models_path, 'ScuNET'))
parser.add_argument("--swinir-models-path", type=str, help="Path to directory with SwinIR model file(s).", default=os.path.join(models_path, 'SwinIR'))
parser.add_argument("--ldsr-models-path", type=str, help="Path to directory with LDSR model file(s).", default=os.path.join(models_path, 'LDSR'))
parser.add_argument("--clip-models-path", type=str, help="Path to directory with CLIP model file(s).", default=None)
parser.add_argument("--xformers", action='store_true', help="enable xformers for cross attention layers")
parser.add_argument("--force-enable-xformers", action='store_true', help="enable xformers for cross attention layers regardless of whether the checking code thinks you can run it; do not make bug reports if this fails to work")
parser.add_argument("--deepdanbooru", action='store_true', help="enable deepdanbooru interrogator")
parser.add_argument("--opt-split-attention", action='store_true', help="force-enables Doggettx's cross-attention layer optimization. By default, it's on for torch cuda.")
parser.add_argument("--opt-split-attention-invokeai", action='store_true', help="force-enables InvokeAI's cross-attention layer optimization. By default, it's on when cuda is unavailable.")
parser.add_argument("--opt-split-attention-v1", action='store_true', help="enable older version of split attention optimization that does not consume all the VRAM it can find")
parser.add_argument("--disable-opt-split-attention", action='store_true', help="force-disables cross-attention layer optimization")
parser.add_argument("--use-cpu", nargs='+',choices=['all', 'sd', 'interrogate', 'gfpgan', 'swinir', 'esrgan', 'scunet', 'codeformer'], help="use CPU as torch device for specified modules", default=[], type=str.lower)
parser.add_argument("--listen", action='store_true', help="launch gradio with 0.0.0.0 as server name, allowing to respond to network requests")
parser.add_argument("--port", type=int, help="launch gradio with given server port, you need root/admin rights for ports < 1024, defaults to 7860 if available", default=None)
parser.add_argument("--show-negative-prompt", action='store_true', help="does not do anything", default=False)
parser.add_argument("--ui-config-file", type=str, help="filename to use for ui configuration", default=os.path.join(script_path, 'ui-config.json'))
parser.add_argument("--hide-ui-dir-config", action='store_true', help="hide directory configuration from webui", default=False)
parser.add_argument("--freeze-settings", action='store_true', help="disable editing settings", default=False)
parser.add_argument("--ui-settings-file", type=str, help="filename to use for ui settings", default=os.path.join(script_path, 'config.json'))
parser.add_argument("--gradio-debug", action='store_true', help="launch gradio with --debug option")
parser.add_argument("--gradio-auth", type=str, help='set gradio authentication like "username:password"; or comma-delimit multiple like "u1:p1,u2:p2,u3:p3"', default=None)
parser.add_argument("--gradio-img2img-tool", type=str, help='gradio image uploader tool: can be either editor for ctopping, or color-sketch for drawing', choices=["color-sketch", "editor"], default="editor")
parser.add_argument("--opt-channelslast", action='store_true', help="change memory type for stable diffusion to channels last")
parser.add_argument("--styles-file", type=str, help="filename to use for styles", default=os.path.join(script_path, 'styles.csv'))
parser.add_argument("--autolaunch", action='store_true', help="open the webui URL in the system's default browser upon launch", default=False)
parser.add_argument("--theme", type=str, help="launches the UI with light or dark theme", default=None)
parser.add_argument("--use-textbox-seed", action='store_true', help="use textbox for seeds in UI (no up/down, but possible to input long seeds)", default=False)
parser.add_argument("--disable-console-progressbars", action='store_true', help="do not output progressbars to console", default=False)
parser.add_argument("--enable-console-prompts", action='store_true', help="print prompts to console when generating with txt2img and img2img", default=False)
parser.add_argument('--vae-path', type=str, help='Path to Variational Autoencoders model', default=None)
parser.add_argument("--disable-safe-unpickle", action='store_true', help="disable checking pytorch models for malicious code", default=False)
parser.add_argument("--api", action='store_true', help="use api=True to launch the api with the webui")
parser.add_argument("--nowebui", action='store_true', help="use api=True to launch the api instead of the webui")
parser.add_argument("--ui-debug-mode", action='store_true', help="Don't load model to quickly launch UI")
parser.add_argument("--device-id", type=str, help="Select the default CUDA device to use (export CUDA_VISIBLE_DEVICES=0,1,etc might be needed before)", default=None)
parser.add_argument("--administrator", action='store_true', help="Administrator rights", default=False)
cmd_opts = parser.parse_args()
restricted_opts = {
"samples_filename_pattern",
"directories_filename_pattern",
"outdir_samples",
"outdir_txt2img_samples",
"outdir_img2img_samples",
"outdir_extras_samples",
"outdir_grids",
"outdir_txt2img_grids",
"outdir_save",
}
cmd_opts.disable_extension_access = (cmd_opts.share or cmd_opts.listen) and not cmd_opts.enable_insecure_extension_access
devices.device, devices.device_interrogate, devices.device_gfpgan, devices.device_swinir, devices.device_esrgan, devices.device_scunet, devices.device_codeformer = \
(devices.cpu if any(y in cmd_opts.use_cpu for y in [x, 'all']) else devices.get_optimal_device() for x in ['sd', 'interrogate', 'gfpgan', 'swinir', 'esrgan', 'scunet', 'codeformer'])
device = devices.device
weight_load_location = None if cmd_opts.lowram else "cpu"
batch_cond_uncond = cmd_opts.always_batch_cond_uncond or not (cmd_opts.lowvram or cmd_opts.medvram)
parallel_processing_allowed = not cmd_opts.lowvram and not cmd_opts.medvram
xformers_available = False
config_filename = cmd_opts.ui_settings_file
os.makedirs(cmd_opts.hypernetwork_dir, exist_ok=True)
hypernetworks = hypernetwork.list_hypernetworks(cmd_opts.hypernetwork_dir)
loaded_hypernetwork = None
def reload_hypernetworks():
global hypernetworks
hypernetworks = hypernetwork.list_hypernetworks(cmd_opts.hypernetwork_dir)
hypernetwork.load_hypernetwork(opts.sd_hypernetwork)
class State:
skipped = False
interrupted = False
job = ""
job_no = 0
job_count = 0
job_timestamp = '0'
sampling_step = 0
sampling_steps = 0
current_latent = None
current_image = None
current_image_sampling_step = 0
textinfo = None
time_start = None
need_restart = False
def skip(self):
self.skipped = True
def interrupt(self):
self.interrupted = True
def nextjob(self):
if opts.show_progress_every_n_steps == -1:
self.do_set_current_image()
self.job_no += 1
self.sampling_step = 0
self.current_image_sampling_step = 0
def dict(self):
obj = {
"skipped": self.skipped,
"interrupted": self.skipped,
"job": self.job,
"job_count": self.job_count,
"job_no": self.job_no,
"sampling_step": self.sampling_step,
"sampling_steps": self.sampling_steps,
}
return obj
def begin(self):
self.sampling_step = 0
self.job_count = -1
self.job_no = 0
self.job_timestamp = datetime.datetime.now().strftime("%Y%m%d%H%M%S")
self.current_latent = None
self.current_image = None
self.current_image_sampling_step = 0
self.skipped = False
self.interrupted = False
self.textinfo = None
self.time_start = time.time()
devices.torch_gc()
def end(self):
self.job = ""
self.job_count = 0
devices.torch_gc()
"""sets self.current_image from self.current_latent if enough sampling steps have been made after the last call to this"""
def set_current_image(self):
if self.sampling_step - self.current_image_sampling_step >= opts.show_progress_every_n_steps and opts.show_progress_every_n_steps > 0:
self.do_set_current_image()
def do_set_current_image(self):
if not parallel_processing_allowed:
return
if self.current_latent is None:
return
if opts.show_progress_grid:
self.current_image = sd_samplers.samples_to_image_grid(self.current_latent)
else:
self.current_image = sd_samplers.sample_to_image(self.current_latent)
self.current_image_sampling_step = self.sampling_step
state = State()
artist_db = modules.artists.ArtistsDatabase(os.path.join(script_path, 'artists.csv'))
styles_filename = cmd_opts.styles_file
prompt_styles = modules.styles.StyleDatabase(styles_filename)
interrogator = modules.interrogate.InterrogateModels("interrogate")
face_restorers = []
localization.list_localizations(cmd_opts.localizations_dir)
def realesrgan_models_names():
import modules.realesrgan_model
return [x.name for x in modules.realesrgan_model.get_realesrgan_models(None)]
class OptionInfo:
def __init__(self, default=None, label="", component=None, component_args=None, onchange=None, section=None, refresh=None):
self.default = default
self.label = label
self.component = component
self.component_args = component_args
self.onchange = onchange
self.section = section
self.refresh = refresh
def options_section(section_identifier, options_dict):
for k, v in options_dict.items():
v.section = section_identifier
return options_dict
hide_dirs = {"visible": not cmd_opts.hide_ui_dir_config}
options_templates = {}
options_templates.update(options_section(('saving-images', "Saving images/grids"), {
"samples_save": OptionInfo(True, "Always save all generated images"),
"samples_format": OptionInfo('png', 'File format for images'),
"samples_filename_pattern": OptionInfo("", "Images filename pattern", component_args=hide_dirs),
"save_images_add_number": OptionInfo(True, "Add number to filename when saving", component_args=hide_dirs),
"grid_save": OptionInfo(True, "Always save all generated image grids"),
"grid_format": OptionInfo('png', 'File format for grids'),
"grid_extended_filename": OptionInfo(False, "Add extended info (seed, prompt) to filename when saving grid"),
"grid_only_if_multiple": OptionInfo(True, "Do not save grids consisting of one picture"),
"grid_prevent_empty_spots": OptionInfo(False, "Prevent empty spots in grid (when set to autodetect)"),
"n_rows": OptionInfo(-1, "Grid row count; use -1 for autodetect and 0 for it to be same as batch size", gr.Slider, {"minimum": -1, "maximum": 16, "step": 1}),
"enable_pnginfo": OptionInfo(True, "Save text information about generation parameters as chunks to png files"),
"save_txt": OptionInfo(False, "Create a text file next to every image with generation parameters."),
"save_images_before_face_restoration": OptionInfo(False, "Save a copy of image before doing face restoration."),
"save_images_before_highres_fix": OptionInfo(False, "Save a copy of image before applying highres fix."),
"save_images_before_color_correction": OptionInfo(False, "Save a copy of image before applying color correction to img2img results"),
"jpeg_quality": OptionInfo(80, "Quality for saved jpeg images", gr.Slider, {"minimum": 1, "maximum": 100, "step": 1}),
"export_for_4chan": OptionInfo(True, "If PNG image is larger than 4MB or any dimension is larger than 4000, downscale and save copy as JPG"),
"use_original_name_batch": OptionInfo(False, "Use original name for output filename during batch process in extras tab"),
"save_selected_only": OptionInfo(True, "When using 'Save' button, only save a single selected image"),
"do_not_add_watermark": OptionInfo(False, "Do not add watermark to images"),
}))
options_templates.update(options_section(('saving-paths', "Paths for saving"), {
"outdir_samples": OptionInfo("", "Output directory for images; if empty, defaults to three directories below", component_args=hide_dirs),
"outdir_txt2img_samples": OptionInfo("outputs/txt2img-images", 'Output directory for txt2img images', component_args=hide_dirs),
"outdir_img2img_samples": OptionInfo("outputs/img2img-images", 'Output directory for img2img images', component_args=hide_dirs),
"outdir_extras_samples": OptionInfo("outputs/extras-images", 'Output directory for images from extras tab', component_args=hide_dirs),
"outdir_grids": OptionInfo("", "Output directory for grids; if empty, defaults to two directories below", component_args=hide_dirs),
"outdir_txt2img_grids": OptionInfo("outputs/txt2img-grids", 'Output directory for txt2img grids', component_args=hide_dirs),
"outdir_img2img_grids": OptionInfo("outputs/img2img-grids", 'Output directory for img2img grids', component_args=hide_dirs),
"outdir_save": OptionInfo("log/images", "Directory for saving images using the Save button", component_args=hide_dirs),
}))
options_templates.update(options_section(('saving-to-dirs', "Saving to a directory"), {
"save_to_dirs": OptionInfo(False, "Save images to a subdirectory"),
"grid_save_to_dirs": OptionInfo(False, "Save grids to a subdirectory"),
"use_save_to_dirs_for_ui": OptionInfo(False, "When using \"Save\" button, save images to a subdirectory"),
"directories_filename_pattern": OptionInfo("", "Directory name pattern", component_args=hide_dirs),
"directories_max_prompt_words": OptionInfo(8, "Max prompt words for [prompt_words] pattern", gr.Slider, {"minimum": 1, "maximum": 20, "step": 1, **hide_dirs}),
}))
options_templates.update(options_section(('upscaling', "Upscaling"), {
"ESRGAN_tile": OptionInfo(192, "Tile size for ESRGAN upscalers. 0 = no tiling.", gr.Slider, {"minimum": 0, "maximum": 512, "step": 16}),
"ESRGAN_tile_overlap": OptionInfo(8, "Tile overlap, in pixels for ESRGAN upscalers. Low values = visible seam.", gr.Slider, {"minimum": 0, "maximum": 48, "step": 1}),
"realesrgan_enabled_models": OptionInfo(["R-ESRGAN x4+", "R-ESRGAN x4+ Anime6B"], "Select which Real-ESRGAN models to show in the web UI. (Requires restart)", gr.CheckboxGroup, lambda: {"choices": realesrgan_models_names()}),
"SWIN_tile": OptionInfo(192, "Tile size for all SwinIR.", gr.Slider, {"minimum": 16, "maximum": 512, "step": 16}),
"SWIN_tile_overlap": OptionInfo(8, "Tile overlap, in pixels for SwinIR. Low values = visible seam.", gr.Slider, {"minimum": 0, "maximum": 48, "step": 1}),
"ldsr_steps": OptionInfo(100, "LDSR processing steps. Lower = faster", gr.Slider, {"minimum": 1, "maximum": 200, "step": 1}),
"upscaler_for_img2img": OptionInfo(None, "Upscaler for img2img", gr.Dropdown, lambda: {"choices": [x.name for x in sd_upscalers]}),
"use_scale_latent_for_hires_fix": OptionInfo(False, "Upscale latent space image when doing hires. fix"),
}))
options_templates.update(options_section(('face-restoration', "Face restoration"), {
"face_restoration_model": OptionInfo(None, "Face restoration model", gr.Radio, lambda: {"choices": [x.name() for x in face_restorers]}),
"code_former_weight": OptionInfo(0.5, "CodeFormer weight parameter; 0 = maximum effect; 1 = minimum effect", gr.Slider, {"minimum": 0, "maximum": 1, "step": 0.01}),
"face_restoration_unload": OptionInfo(False, "Move face restoration model from VRAM into RAM after processing"),
}))
options_templates.update(options_section(('system', "System"), {
"memmon_poll_rate": OptionInfo(8, "VRAM usage polls per second during generation. Set to 0 to disable.", gr.Slider, {"minimum": 0, "maximum": 40, "step": 1}),
"samples_log_stdout": OptionInfo(False, "Always print all generation info to standard output"),
"multiple_tqdm": OptionInfo(True, "Add a second progress bar to the console that shows progress for an entire job."),
}))
options_templates.update(options_section(('training', "Training"), {
"unload_models_when_training": OptionInfo(False, "Move VAE and CLIP to RAM when training if possible. Saves VRAM."),
"dataset_filename_word_regex": OptionInfo("", "Filename word regex"),
"dataset_filename_join_string": OptionInfo(" ", "Filename join string"),
"training_image_repeats_per_epoch": OptionInfo(1, "Number of repeats for a single input image per epoch; used only for displaying epoch number", gr.Number, {"precision": 0}),
"training_write_csv_every": OptionInfo(500, "Save an csv containing the loss to log directory every N steps, 0 to disable"),
"training_xattention_optimizations": OptionInfo(False, "Use cross attention optimizations while training"),
}))
options_templates.update(options_section(('sd', "Stable Diffusion"), {
"sd_model_checkpoint": OptionInfo(None, "Stable Diffusion checkpoint", gr.Dropdown, lambda: {"choices": modules.sd_models.checkpoint_tiles()}, refresh=sd_models.list_models),
"sd_checkpoint_cache": OptionInfo(0, "Checkpoints to cache in RAM", gr.Slider, {"minimum": 0, "maximum": 10, "step": 1}),
"sd_vae": OptionInfo("auto", "SD VAE", gr.Dropdown, lambda: {"choices": list(sd_vae.vae_list)}, refresh=sd_vae.refresh_vae_list),
"sd_hypernetwork": OptionInfo("None", "Hypernetwork", gr.Dropdown, lambda: {"choices": ["None"] + [x for x in hypernetworks.keys()]}, refresh=reload_hypernetworks),
"sd_hypernetwork_strength": OptionInfo(1.0, "Hypernetwork strength", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.001}),
"inpainting_mask_weight": OptionInfo(1.0, "Inpainting conditioning mask strength", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}),
"img2img_color_correction": OptionInfo(False, "Apply color correction to img2img results to match original colors."),
"img2img_fix_steps": OptionInfo(False, "With img2img, do exactly the amount of steps the slider specifies (normally you'd do less with less denoising)."),
"enable_quantization": OptionInfo(False, "Enable quantization in K samplers for sharper and cleaner results. This may change existing seeds. Requires restart to apply."),
"enable_emphasis": OptionInfo(True, "Emphasis: use (text) to make model pay more attention to text and [text] to make it pay less attention"),
"use_old_emphasis_implementation": OptionInfo(False, "Use old emphasis implementation. Can be useful to reproduce old seeds."),
"enable_batch_seeds": OptionInfo(True, "Make K-diffusion samplers produce same images in a batch as when making a single image"),
"comma_padding_backtrack": OptionInfo(20, "Increase coherency by padding from the last comma within n tokens when using more than 75 tokens", gr.Slider, {"minimum": 0, "maximum": 74, "step": 1 }),
"filter_nsfw": OptionInfo(False, "Filter NSFW content"),
'CLIP_stop_at_last_layers': OptionInfo(1, "Stop At last layers of CLIP model", gr.Slider, {"minimum": 1, "maximum": 12, "step": 1}),
"random_artist_categories": OptionInfo([], "Allowed categories for random artists selection when using the Roll button", gr.CheckboxGroup, {"choices": artist_db.categories()}),
}))
options_templates.update(options_section(('interrogate', "Interrogate Options"), {
"interrogate_keep_models_in_memory": OptionInfo(False, "Interrogate: keep models in VRAM"),
"interrogate_use_builtin_artists": OptionInfo(True, "Interrogate: use artists from artists.csv"),
"interrogate_return_ranks": OptionInfo(False, "Interrogate: include ranks of model tags matches in results (Has no effect on caption-based interrogators)."),
"interrogate_clip_num_beams": OptionInfo(1, "Interrogate: num_beams for BLIP", gr.Slider, {"minimum": 1, "maximum": 16, "step": 1}),
"interrogate_clip_min_length": OptionInfo(24, "Interrogate: minimum description length (excluding artists, etc..)", gr.Slider, {"minimum": 1, "maximum": 128, "step": 1}),
"interrogate_clip_max_length": OptionInfo(48, "Interrogate: maximum description length", gr.Slider, {"minimum": 1, "maximum": 256, "step": 1}),
"interrogate_clip_dict_limit": OptionInfo(1500, "CLIP: maximum number of lines in text file (0 = No limit)"),
"interrogate_deepbooru_score_threshold": OptionInfo(0.5, "Interrogate: deepbooru score threshold", gr.Slider, {"minimum": 0, "maximum": 1, "step": 0.01}),
"deepbooru_sort_alpha": OptionInfo(True, "Interrogate: deepbooru sort alphabetically"),
"deepbooru_use_spaces": OptionInfo(False, "use spaces for tags in deepbooru"),
"deepbooru_escape": OptionInfo(True, "escape (\\) brackets in deepbooru (so they are used as literal brackets and not for emphasis)"),
}))
options_templates.update(options_section(('ui', "User interface"), {
"show_progressbar": OptionInfo(True, "Show progressbar"),
"show_progress_every_n_steps": OptionInfo(0, "Show image creation progress every N sampling steps. Set to 0 to disable. Set to -1 to show after completion of batch.", gr.Slider, {"minimum": -1, "maximum": 32, "step": 1}),
"show_progress_grid": OptionInfo(True, "Show previews of all images generated in a batch as a grid"),
"return_grid": OptionInfo(True, "Show grid in results for web"),
"do_not_show_images": OptionInfo(False, "Do not show any images in results for web"),
"add_model_hash_to_info": OptionInfo(True, "Add model hash to generation information"),
"add_model_name_to_info": OptionInfo(False, "Add model name to generation information"),
"disable_weights_auto_swap": OptionInfo(False, "When reading generation parameters from text into UI (from PNG info or pasted text), do not change the selected model/checkpoint."),
"send_seed": OptionInfo(True, "Send seed when sending prompt or image to other interface"),
"font": OptionInfo("", "Font for image grids that have text"),
"js_modal_lightbox": OptionInfo(True, "Enable full page image viewer"),
"js_modal_lightbox_initially_zoomed": OptionInfo(True, "Show images zoomed in by default in full page image viewer"),
"show_progress_in_title": OptionInfo(True, "Show generation progress in window title."),
'quicksettings': OptionInfo("sd_model_checkpoint", "Quicksettings list"),
'localization': OptionInfo("None", "Localization (requires restart)", gr.Dropdown, lambda: {"choices": ["None"] + list(localization.localizations.keys())}, refresh=lambda: localization.list_localizations(cmd_opts.localizations_dir)),
}))
options_templates.update(options_section(('sampler-params', "Sampler parameters"), {
"hide_samplers": OptionInfo([], "Hide samplers in user interface (requires restart)", gr.CheckboxGroup, lambda: {"choices": [x.name for x in sd_samplers.all_samplers]}),
"eta_ddim": OptionInfo(0.0, "eta (noise multiplier) for DDIM", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}),
"eta_ancestral": OptionInfo(1.0, "eta (noise multiplier) for ancestral samplers", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}),
"ddim_discretize": OptionInfo('uniform', "img2img DDIM discretize", gr.Radio, {"choices": ['uniform', 'quad']}),
's_churn': OptionInfo(0.0, "sigma churn", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}),
's_tmin': OptionInfo(0.0, "sigma tmin", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}),
's_noise': OptionInfo(1.0, "sigma noise", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}),
'eta_noise_seed_delta': OptionInfo(0, "Eta noise seed delta", gr.Number, {"precision": 0}),
}))
options_templates.update(options_section((None, "Hidden options"), {
"disabled_extensions": OptionInfo([], "Disable those extensions"),
}))
options_templates.update()
class Options:
data = None
data_labels = options_templates
typemap = {int: float}
def __init__(self):
self.data = {k: v.default for k, v in self.data_labels.items()}
def __setattr__(self, key, value):
if self.data is not None:
if key in self.data or key in self.data_labels:
assert not cmd_opts.freeze_settings, "changing settings is disabled"
info = opts.data_labels.get(key, None)
comp_args = info.component_args if info else None
if isinstance(comp_args, dict) and comp_args.get('visible', True) is False:
raise RuntimeError(f"not possible to set {key} because it is restricted")
if cmd_opts.hide_ui_dir_config and key in restricted_opts:
raise RuntimeError(f"not possible to set {key} because it is restricted")
self.data[key] = value
return
return super(Options, self).__setattr__(key, value)
def __getattr__(self, item):
if self.data is not None:
if item in self.data:
return self.data[item]
if item in self.data_labels:
return self.data_labels[item].default
return super(Options, self).__getattribute__(item)
def save(self, filename):
assert not cmd_opts.freeze_settings, "saving settings is disabled"
with open(filename, "w", encoding="utf8") as file:
json.dump(self.data, file, indent=4)
def same_type(self, x, y):
if x is None or y is None:
return True
type_x = self.typemap.get(type(x), type(x))
type_y = self.typemap.get(type(y), type(y))
return type_x == type_y
def load(self, filename):
with open(filename, "r", encoding="utf8") as file:
self.data = json.load(file)
bad_settings = 0
for k, v in self.data.items():
info = self.data_labels.get(k, None)
if info is not None and not self.same_type(info.default, v):
print(f"Warning: bad setting value: {k}: {v} ({type(v).__name__}; expected {type(info.default).__name__})", file=sys.stderr)
bad_settings += 1
if bad_settings > 0:
print(f"The program is likely to not work with bad settings.\nSettings file: {filename}\nEither fix the file, or delete it and restart.", file=sys.stderr)
def onchange(self, key, func, call=True):
item = self.data_labels.get(key)
item.onchange = func
if call:
func()
def dumpjson(self):
d = {k: self.data.get(k, self.data_labels.get(k).default) for k in self.data_labels.keys()}
return json.dumps(d)
def add_option(self, key, info):
self.data_labels[key] = info
def reorder(self):
"""reorder settings so that all items related to section always go together"""
section_ids = {}
settings_items = self.data_labels.items()
for k, item in settings_items:
if item.section not in section_ids:
section_ids[item.section] = len(section_ids)
self.data_labels = {k: v for k, v in sorted(settings_items, key=lambda x: section_ids[x[1].section])}
opts = Options()
if os.path.exists(config_filename):
opts.load(config_filename)
sd_upscalers = []
sd_model = None
clip_model = None
progress_print_out = sys.stdout
class TotalTQDM:
def __init__(self):
self._tqdm = None
def reset(self):
self._tqdm = tqdm.tqdm(
desc="Total progress",
total=state.job_count * state.sampling_steps,
position=1,
file=progress_print_out
)
def update(self):
if not opts.multiple_tqdm or cmd_opts.disable_console_progressbars:
return
if self._tqdm is None:
self.reset()
self._tqdm.update()
def updateTotal(self, new_total):
if not opts.multiple_tqdm or cmd_opts.disable_console_progressbars:
return
if self._tqdm is None:
self.reset()
self._tqdm.total=new_total
def clear(self):
if self._tqdm is not None:
self._tqdm.close()
self._tqdm = None
total_tqdm = TotalTQDM()
mem_mon = modules.memmon.MemUsageMonitor("MemMon", device, opts)
mem_mon.start()
def listfiles(dirname):
filenames = [os.path.join(dirname, x) for x in sorted(os.listdir(dirname)) if not x.startswith(".")]
return [file for file in filenames if os.path.isfile(file)]
<|code_end|>
webui.py
<|code_start|>import os
import threading
import time
import importlib
import signal
import threading
from fastapi import FastAPI
from fastapi.middleware.gzip import GZipMiddleware
from modules.paths import script_path
from modules import devices, sd_samplers, upscaler, extensions
import modules.codeformer_model as codeformer
import modules.extras
import modules.face_restoration
import modules.gfpgan_model as gfpgan
import modules.img2img
import modules.lowvram
import modules.paths
import modules.scripts
import modules.sd_hijack
import modules.sd_models
import modules.sd_vae
import modules.shared as shared
import modules.txt2img
import modules.script_callbacks
import modules.ui
from modules import devices
from modules import modelloader
from modules.paths import script_path
from modules.shared import cmd_opts
import modules.hypernetworks.hypernetwork
queue_lock = threading.Lock()
def wrap_queued_call(func):
def f(*args, **kwargs):
with queue_lock:
res = func(*args, **kwargs)
return res
return f
def wrap_gradio_gpu_call(func, extra_outputs=None):
def f(*args, **kwargs):
shared.state.begin()
with queue_lock:
res = func(*args, **kwargs)
shared.state.end()
return res
return modules.ui.wrap_gradio_call(f, extra_outputs=extra_outputs)
def initialize():
extensions.list_extensions()
if cmd_opts.ui_debug_mode:
shared.sd_upscalers = upscaler.UpscalerLanczos().scalers
modules.scripts.load_scripts()
return
modelloader.cleanup_models()
modules.sd_models.setup_model()
codeformer.setup_model(cmd_opts.codeformer_models_path)
gfpgan.setup_model(cmd_opts.gfpgan_models_path)
shared.face_restorers.append(modules.face_restoration.FaceRestoration())
modelloader.load_upscalers()
modules.scripts.load_scripts()
modules.sd_vae.refresh_vae_list()
modules.sd_models.load_model()
shared.opts.onchange("sd_model_checkpoint", wrap_queued_call(lambda: modules.sd_models.reload_model_weights()))
shared.opts.onchange("sd_vae", wrap_queued_call(lambda: modules.sd_vae.reload_vae_weights()), call=False)
shared.opts.onchange("sd_hypernetwork", wrap_queued_call(lambda: modules.hypernetworks.hypernetwork.load_hypernetwork(shared.opts.sd_hypernetwork)))
shared.opts.onchange("sd_hypernetwork_strength", modules.hypernetworks.hypernetwork.apply_strength)
# make the program just exit at ctrl+c without waiting for anything
def sigint_handler(sig, frame):
print(f'Interrupted with signal {sig} in {frame}')
os._exit(0)
signal.signal(signal.SIGINT, sigint_handler)
def create_api(app):
from modules.api.api import Api
api = Api(app, queue_lock)
return api
def wait_on_server(demo=None):
while 1:
time.sleep(0.5)
if shared.state.need_restart:
shared.state.need_restart = False
time.sleep(0.5)
demo.close()
time.sleep(0.5)
break
def api_only():
initialize()
app = FastAPI()
app.add_middleware(GZipMiddleware, minimum_size=1000)
api = create_api(app)
modules.script_callbacks.app_started_callback(None, app)
api.launch(server_name="0.0.0.0" if cmd_opts.listen else "127.0.0.1", port=cmd_opts.port if cmd_opts.port else 7861)
def webui():
launch_api = cmd_opts.api
initialize()
while 1:
demo = modules.ui.create_ui(wrap_gradio_gpu_call=wrap_gradio_gpu_call)
app, local_url, share_url = demo.launch(
share=cmd_opts.share,
server_name="0.0.0.0" if cmd_opts.listen else None,
server_port=cmd_opts.port,
debug=cmd_opts.gradio_debug,
auth=[tuple(cred.split(':')) for cred in cmd_opts.gradio_auth.strip('"').split(',')] if cmd_opts.gradio_auth else None,
inbrowser=cmd_opts.autolaunch,
prevent_thread_lock=True
)
# after initial launch, disable --autolaunch for subsequent restarts
cmd_opts.autolaunch = False
# gradio uses a very open CORS policy via app.user_middleware, which makes it possible for
# an attacker to trick the user into opening a malicious HTML page, which makes a request to the
# running web ui and do whatever the attcker wants, including installing an extension and
# runnnig its code. We disable this here. Suggested by RyotaK.
app.user_middleware = [x for x in app.user_middleware if x.cls.__name__ != 'CORSMiddleware']
app.add_middleware(GZipMiddleware, minimum_size=1000)
if launch_api:
create_api(app)
modules.script_callbacks.app_started_callback(demo, app)
wait_on_server(demo)
sd_samplers.set_samplers()
print('Reloading extensions')
extensions.list_extensions()
print('Reloading custom scripts')
modules.scripts.reload_scripts()
print('Reloading modules: modules.ui')
importlib.reload(modules.ui)
print('Refreshing Model List')
modules.sd_models.list_models()
print('Restarting Gradio')
if __name__ == "__main__":
if cmd_opts.nowebui:
api_only()
else:
webui()
<|code_end|>
|
modules/shared.py
<|code_start|>import argparse
import datetime
import json
import os
import sys
from collections import OrderedDict
import time
import gradio as gr
import tqdm
import modules.artists
import modules.interrogate
import modules.memmon
import modules.sd_models
import modules.styles
import modules.devices as devices
from modules import sd_samplers, sd_models, localization, sd_vae
from modules.hypernetworks import hypernetwork
from modules.paths import models_path, script_path, sd_path
sd_model_file = os.path.join(script_path, 'model.ckpt')
default_sd_model_file = sd_model_file
parser = argparse.ArgumentParser()
parser.add_argument("--config", type=str, default=os.path.join(sd_path, "configs/stable-diffusion/v1-inference.yaml"), help="path to config which constructs model",)
parser.add_argument("--ckpt", type=str, default=sd_model_file, help="path to checkpoint of stable diffusion model; if specified, this checkpoint will be added to the list of checkpoints and loaded",)
parser.add_argument("--ckpt-dir", type=str, default=None, help="Path to directory with stable diffusion checkpoints")
parser.add_argument("--gfpgan-dir", type=str, help="GFPGAN directory", default=('./src/gfpgan' if os.path.exists('./src/gfpgan') else './GFPGAN'))
parser.add_argument("--gfpgan-model", type=str, help="GFPGAN model file name", default=None)
parser.add_argument("--no-half", action='store_true', help="do not switch the model to 16-bit floats")
parser.add_argument("--no-half-vae", action='store_true', help="do not switch the VAE model to 16-bit floats")
parser.add_argument("--no-progressbar-hiding", action='store_true', help="do not hide progressbar in gradio UI (we hide it because it slows down ML if you have hardware acceleration in browser)")
parser.add_argument("--max-batch-count", type=int, default=16, help="maximum batch count value for the UI")
parser.add_argument("--embeddings-dir", type=str, default=os.path.join(script_path, 'embeddings'), help="embeddings directory for textual inversion (default: embeddings)")
parser.add_argument("--hypernetwork-dir", type=str, default=os.path.join(models_path, 'hypernetworks'), help="hypernetwork directory")
parser.add_argument("--localizations-dir", type=str, default=os.path.join(script_path, 'localizations'), help="localizations directory")
parser.add_argument("--allow-code", action='store_true', help="allow custom script execution from webui")
parser.add_argument("--medvram", action='store_true', help="enable stable diffusion model optimizations for sacrificing a little speed for low VRM usage")
parser.add_argument("--lowvram", action='store_true', help="enable stable diffusion model optimizations for sacrificing a lot of speed for very low VRM usage")
parser.add_argument("--lowram", action='store_true', help="load stable diffusion checkpoint weights to VRAM instead of RAM")
parser.add_argument("--always-batch-cond-uncond", action='store_true', help="disables cond/uncond batching that is enabled to save memory with --medvram or --lowvram")
parser.add_argument("--unload-gfpgan", action='store_true', help="does not do anything.")
parser.add_argument("--precision", type=str, help="evaluate at this precision", choices=["full", "autocast"], default="autocast")
parser.add_argument("--share", action='store_true', help="use share=True for gradio and make the UI accessible through their site")
parser.add_argument("--ngrok", type=str, help="ngrok authtoken, alternative to gradio --share", default=None)
parser.add_argument("--ngrok-region", type=str, help="The region in which ngrok should start.", default="us")
parser.add_argument("--enable-insecure-extension-access", action='store_true', help="enable extensions tab regardless of other options")
parser.add_argument("--codeformer-models-path", type=str, help="Path to directory with codeformer model file(s).", default=os.path.join(models_path, 'Codeformer'))
parser.add_argument("--gfpgan-models-path", type=str, help="Path to directory with GFPGAN model file(s).", default=os.path.join(models_path, 'GFPGAN'))
parser.add_argument("--esrgan-models-path", type=str, help="Path to directory with ESRGAN model file(s).", default=os.path.join(models_path, 'ESRGAN'))
parser.add_argument("--bsrgan-models-path", type=str, help="Path to directory with BSRGAN model file(s).", default=os.path.join(models_path, 'BSRGAN'))
parser.add_argument("--realesrgan-models-path", type=str, help="Path to directory with RealESRGAN model file(s).", default=os.path.join(models_path, 'RealESRGAN'))
parser.add_argument("--scunet-models-path", type=str, help="Path to directory with ScuNET model file(s).", default=os.path.join(models_path, 'ScuNET'))
parser.add_argument("--swinir-models-path", type=str, help="Path to directory with SwinIR model file(s).", default=os.path.join(models_path, 'SwinIR'))
parser.add_argument("--ldsr-models-path", type=str, help="Path to directory with LDSR model file(s).", default=os.path.join(models_path, 'LDSR'))
parser.add_argument("--clip-models-path", type=str, help="Path to directory with CLIP model file(s).", default=None)
parser.add_argument("--xformers", action='store_true', help="enable xformers for cross attention layers")
parser.add_argument("--force-enable-xformers", action='store_true', help="enable xformers for cross attention layers regardless of whether the checking code thinks you can run it; do not make bug reports if this fails to work")
parser.add_argument("--deepdanbooru", action='store_true', help="enable deepdanbooru interrogator")
parser.add_argument("--opt-split-attention", action='store_true', help="force-enables Doggettx's cross-attention layer optimization. By default, it's on for torch cuda.")
parser.add_argument("--opt-split-attention-invokeai", action='store_true', help="force-enables InvokeAI's cross-attention layer optimization. By default, it's on when cuda is unavailable.")
parser.add_argument("--opt-split-attention-v1", action='store_true', help="enable older version of split attention optimization that does not consume all the VRAM it can find")
parser.add_argument("--disable-opt-split-attention", action='store_true', help="force-disables cross-attention layer optimization")
parser.add_argument("--use-cpu", nargs='+',choices=['all', 'sd', 'interrogate', 'gfpgan', 'swinir', 'esrgan', 'scunet', 'codeformer'], help="use CPU as torch device for specified modules", default=[], type=str.lower)
parser.add_argument("--listen", action='store_true', help="launch gradio with 0.0.0.0 as server name, allowing to respond to network requests")
parser.add_argument("--port", type=int, help="launch gradio with given server port, you need root/admin rights for ports < 1024, defaults to 7860 if available", default=None)
parser.add_argument("--show-negative-prompt", action='store_true', help="does not do anything", default=False)
parser.add_argument("--ui-config-file", type=str, help="filename to use for ui configuration", default=os.path.join(script_path, 'ui-config.json'))
parser.add_argument("--hide-ui-dir-config", action='store_true', help="hide directory configuration from webui", default=False)
parser.add_argument("--freeze-settings", action='store_true', help="disable editing settings", default=False)
parser.add_argument("--ui-settings-file", type=str, help="filename to use for ui settings", default=os.path.join(script_path, 'config.json'))
parser.add_argument("--gradio-debug", action='store_true', help="launch gradio with --debug option")
parser.add_argument("--gradio-auth", type=str, help='set gradio authentication like "username:password"; or comma-delimit multiple like "u1:p1,u2:p2,u3:p3"', default=None)
parser.add_argument("--gradio-img2img-tool", type=str, help='gradio image uploader tool: can be either editor for ctopping, or color-sketch for drawing', choices=["color-sketch", "editor"], default="editor")
parser.add_argument("--opt-channelslast", action='store_true', help="change memory type for stable diffusion to channels last")
parser.add_argument("--styles-file", type=str, help="filename to use for styles", default=os.path.join(script_path, 'styles.csv'))
parser.add_argument("--autolaunch", action='store_true', help="open the webui URL in the system's default browser upon launch", default=False)
parser.add_argument("--theme", type=str, help="launches the UI with light or dark theme", default=None)
parser.add_argument("--use-textbox-seed", action='store_true', help="use textbox for seeds in UI (no up/down, but possible to input long seeds)", default=False)
parser.add_argument("--disable-console-progressbars", action='store_true', help="do not output progressbars to console", default=False)
parser.add_argument("--enable-console-prompts", action='store_true', help="print prompts to console when generating with txt2img and img2img", default=False)
parser.add_argument('--vae-path', type=str, help='Path to Variational Autoencoders model', default=None)
parser.add_argument("--disable-safe-unpickle", action='store_true', help="disable checking pytorch models for malicious code", default=False)
parser.add_argument("--api", action='store_true', help="use api=True to launch the api with the webui")
parser.add_argument("--nowebui", action='store_true', help="use api=True to launch the api instead of the webui")
parser.add_argument("--ui-debug-mode", action='store_true', help="Don't load model to quickly launch UI")
parser.add_argument("--device-id", type=str, help="Select the default CUDA device to use (export CUDA_VISIBLE_DEVICES=0,1,etc might be needed before)", default=None)
parser.add_argument("--administrator", action='store_true', help="Administrator rights", default=False)
parser.add_argument("--tls-keyfile", type=str, help="Partially enables TLS, requires --tls-certfile to fully function", default=None)
parser.add_argument("--tls-certfile", type=str, help="Partially enables TLS, requires --tls-keyfile to fully function", default=None)
parser.add_argument("--server-name", type=str, help="Sets hostname of server", default=None)
cmd_opts = parser.parse_args()
restricted_opts = {
"samples_filename_pattern",
"directories_filename_pattern",
"outdir_samples",
"outdir_txt2img_samples",
"outdir_img2img_samples",
"outdir_extras_samples",
"outdir_grids",
"outdir_txt2img_grids",
"outdir_save",
}
cmd_opts.disable_extension_access = (cmd_opts.share or cmd_opts.listen) and not cmd_opts.enable_insecure_extension_access
devices.device, devices.device_interrogate, devices.device_gfpgan, devices.device_swinir, devices.device_esrgan, devices.device_scunet, devices.device_codeformer = \
(devices.cpu if any(y in cmd_opts.use_cpu for y in [x, 'all']) else devices.get_optimal_device() for x in ['sd', 'interrogate', 'gfpgan', 'swinir', 'esrgan', 'scunet', 'codeformer'])
device = devices.device
weight_load_location = None if cmd_opts.lowram else "cpu"
batch_cond_uncond = cmd_opts.always_batch_cond_uncond or not (cmd_opts.lowvram or cmd_opts.medvram)
parallel_processing_allowed = not cmd_opts.lowvram and not cmd_opts.medvram
xformers_available = False
config_filename = cmd_opts.ui_settings_file
os.makedirs(cmd_opts.hypernetwork_dir, exist_ok=True)
hypernetworks = hypernetwork.list_hypernetworks(cmd_opts.hypernetwork_dir)
loaded_hypernetwork = None
def reload_hypernetworks():
global hypernetworks
hypernetworks = hypernetwork.list_hypernetworks(cmd_opts.hypernetwork_dir)
hypernetwork.load_hypernetwork(opts.sd_hypernetwork)
class State:
skipped = False
interrupted = False
job = ""
job_no = 0
job_count = 0
job_timestamp = '0'
sampling_step = 0
sampling_steps = 0
current_latent = None
current_image = None
current_image_sampling_step = 0
textinfo = None
time_start = None
need_restart = False
def skip(self):
self.skipped = True
def interrupt(self):
self.interrupted = True
def nextjob(self):
if opts.show_progress_every_n_steps == -1:
self.do_set_current_image()
self.job_no += 1
self.sampling_step = 0
self.current_image_sampling_step = 0
def dict(self):
obj = {
"skipped": self.skipped,
"interrupted": self.skipped,
"job": self.job,
"job_count": self.job_count,
"job_no": self.job_no,
"sampling_step": self.sampling_step,
"sampling_steps": self.sampling_steps,
}
return obj
def begin(self):
self.sampling_step = 0
self.job_count = -1
self.job_no = 0
self.job_timestamp = datetime.datetime.now().strftime("%Y%m%d%H%M%S")
self.current_latent = None
self.current_image = None
self.current_image_sampling_step = 0
self.skipped = False
self.interrupted = False
self.textinfo = None
self.time_start = time.time()
devices.torch_gc()
def end(self):
self.job = ""
self.job_count = 0
devices.torch_gc()
"""sets self.current_image from self.current_latent if enough sampling steps have been made after the last call to this"""
def set_current_image(self):
if self.sampling_step - self.current_image_sampling_step >= opts.show_progress_every_n_steps and opts.show_progress_every_n_steps > 0:
self.do_set_current_image()
def do_set_current_image(self):
if not parallel_processing_allowed:
return
if self.current_latent is None:
return
if opts.show_progress_grid:
self.current_image = sd_samplers.samples_to_image_grid(self.current_latent)
else:
self.current_image = sd_samplers.sample_to_image(self.current_latent)
self.current_image_sampling_step = self.sampling_step
state = State()
artist_db = modules.artists.ArtistsDatabase(os.path.join(script_path, 'artists.csv'))
styles_filename = cmd_opts.styles_file
prompt_styles = modules.styles.StyleDatabase(styles_filename)
interrogator = modules.interrogate.InterrogateModels("interrogate")
face_restorers = []
localization.list_localizations(cmd_opts.localizations_dir)
def realesrgan_models_names():
import modules.realesrgan_model
return [x.name for x in modules.realesrgan_model.get_realesrgan_models(None)]
class OptionInfo:
def __init__(self, default=None, label="", component=None, component_args=None, onchange=None, section=None, refresh=None):
self.default = default
self.label = label
self.component = component
self.component_args = component_args
self.onchange = onchange
self.section = section
self.refresh = refresh
def options_section(section_identifier, options_dict):
for k, v in options_dict.items():
v.section = section_identifier
return options_dict
hide_dirs = {"visible": not cmd_opts.hide_ui_dir_config}
options_templates = {}
options_templates.update(options_section(('saving-images', "Saving images/grids"), {
"samples_save": OptionInfo(True, "Always save all generated images"),
"samples_format": OptionInfo('png', 'File format for images'),
"samples_filename_pattern": OptionInfo("", "Images filename pattern", component_args=hide_dirs),
"save_images_add_number": OptionInfo(True, "Add number to filename when saving", component_args=hide_dirs),
"grid_save": OptionInfo(True, "Always save all generated image grids"),
"grid_format": OptionInfo('png', 'File format for grids'),
"grid_extended_filename": OptionInfo(False, "Add extended info (seed, prompt) to filename when saving grid"),
"grid_only_if_multiple": OptionInfo(True, "Do not save grids consisting of one picture"),
"grid_prevent_empty_spots": OptionInfo(False, "Prevent empty spots in grid (when set to autodetect)"),
"n_rows": OptionInfo(-1, "Grid row count; use -1 for autodetect and 0 for it to be same as batch size", gr.Slider, {"minimum": -1, "maximum": 16, "step": 1}),
"enable_pnginfo": OptionInfo(True, "Save text information about generation parameters as chunks to png files"),
"save_txt": OptionInfo(False, "Create a text file next to every image with generation parameters."),
"save_images_before_face_restoration": OptionInfo(False, "Save a copy of image before doing face restoration."),
"save_images_before_highres_fix": OptionInfo(False, "Save a copy of image before applying highres fix."),
"save_images_before_color_correction": OptionInfo(False, "Save a copy of image before applying color correction to img2img results"),
"jpeg_quality": OptionInfo(80, "Quality for saved jpeg images", gr.Slider, {"minimum": 1, "maximum": 100, "step": 1}),
"export_for_4chan": OptionInfo(True, "If PNG image is larger than 4MB or any dimension is larger than 4000, downscale and save copy as JPG"),
"use_original_name_batch": OptionInfo(False, "Use original name for output filename during batch process in extras tab"),
"save_selected_only": OptionInfo(True, "When using 'Save' button, only save a single selected image"),
"do_not_add_watermark": OptionInfo(False, "Do not add watermark to images"),
}))
options_templates.update(options_section(('saving-paths', "Paths for saving"), {
"outdir_samples": OptionInfo("", "Output directory for images; if empty, defaults to three directories below", component_args=hide_dirs),
"outdir_txt2img_samples": OptionInfo("outputs/txt2img-images", 'Output directory for txt2img images', component_args=hide_dirs),
"outdir_img2img_samples": OptionInfo("outputs/img2img-images", 'Output directory for img2img images', component_args=hide_dirs),
"outdir_extras_samples": OptionInfo("outputs/extras-images", 'Output directory for images from extras tab', component_args=hide_dirs),
"outdir_grids": OptionInfo("", "Output directory for grids; if empty, defaults to two directories below", component_args=hide_dirs),
"outdir_txt2img_grids": OptionInfo("outputs/txt2img-grids", 'Output directory for txt2img grids', component_args=hide_dirs),
"outdir_img2img_grids": OptionInfo("outputs/img2img-grids", 'Output directory for img2img grids', component_args=hide_dirs),
"outdir_save": OptionInfo("log/images", "Directory for saving images using the Save button", component_args=hide_dirs),
}))
options_templates.update(options_section(('saving-to-dirs', "Saving to a directory"), {
"save_to_dirs": OptionInfo(False, "Save images to a subdirectory"),
"grid_save_to_dirs": OptionInfo(False, "Save grids to a subdirectory"),
"use_save_to_dirs_for_ui": OptionInfo(False, "When using \"Save\" button, save images to a subdirectory"),
"directories_filename_pattern": OptionInfo("", "Directory name pattern", component_args=hide_dirs),
"directories_max_prompt_words": OptionInfo(8, "Max prompt words for [prompt_words] pattern", gr.Slider, {"minimum": 1, "maximum": 20, "step": 1, **hide_dirs}),
}))
options_templates.update(options_section(('upscaling', "Upscaling"), {
"ESRGAN_tile": OptionInfo(192, "Tile size for ESRGAN upscalers. 0 = no tiling.", gr.Slider, {"minimum": 0, "maximum": 512, "step": 16}),
"ESRGAN_tile_overlap": OptionInfo(8, "Tile overlap, in pixels for ESRGAN upscalers. Low values = visible seam.", gr.Slider, {"minimum": 0, "maximum": 48, "step": 1}),
"realesrgan_enabled_models": OptionInfo(["R-ESRGAN x4+", "R-ESRGAN x4+ Anime6B"], "Select which Real-ESRGAN models to show in the web UI. (Requires restart)", gr.CheckboxGroup, lambda: {"choices": realesrgan_models_names()}),
"SWIN_tile": OptionInfo(192, "Tile size for all SwinIR.", gr.Slider, {"minimum": 16, "maximum": 512, "step": 16}),
"SWIN_tile_overlap": OptionInfo(8, "Tile overlap, in pixels for SwinIR. Low values = visible seam.", gr.Slider, {"minimum": 0, "maximum": 48, "step": 1}),
"ldsr_steps": OptionInfo(100, "LDSR processing steps. Lower = faster", gr.Slider, {"minimum": 1, "maximum": 200, "step": 1}),
"upscaler_for_img2img": OptionInfo(None, "Upscaler for img2img", gr.Dropdown, lambda: {"choices": [x.name for x in sd_upscalers]}),
"use_scale_latent_for_hires_fix": OptionInfo(False, "Upscale latent space image when doing hires. fix"),
}))
options_templates.update(options_section(('face-restoration', "Face restoration"), {
"face_restoration_model": OptionInfo(None, "Face restoration model", gr.Radio, lambda: {"choices": [x.name() for x in face_restorers]}),
"code_former_weight": OptionInfo(0.5, "CodeFormer weight parameter; 0 = maximum effect; 1 = minimum effect", gr.Slider, {"minimum": 0, "maximum": 1, "step": 0.01}),
"face_restoration_unload": OptionInfo(False, "Move face restoration model from VRAM into RAM after processing"),
}))
options_templates.update(options_section(('system', "System"), {
"memmon_poll_rate": OptionInfo(8, "VRAM usage polls per second during generation. Set to 0 to disable.", gr.Slider, {"minimum": 0, "maximum": 40, "step": 1}),
"samples_log_stdout": OptionInfo(False, "Always print all generation info to standard output"),
"multiple_tqdm": OptionInfo(True, "Add a second progress bar to the console that shows progress for an entire job."),
}))
options_templates.update(options_section(('training', "Training"), {
"unload_models_when_training": OptionInfo(False, "Move VAE and CLIP to RAM when training if possible. Saves VRAM."),
"dataset_filename_word_regex": OptionInfo("", "Filename word regex"),
"dataset_filename_join_string": OptionInfo(" ", "Filename join string"),
"training_image_repeats_per_epoch": OptionInfo(1, "Number of repeats for a single input image per epoch; used only for displaying epoch number", gr.Number, {"precision": 0}),
"training_write_csv_every": OptionInfo(500, "Save an csv containing the loss to log directory every N steps, 0 to disable"),
"training_xattention_optimizations": OptionInfo(False, "Use cross attention optimizations while training"),
}))
options_templates.update(options_section(('sd', "Stable Diffusion"), {
"sd_model_checkpoint": OptionInfo(None, "Stable Diffusion checkpoint", gr.Dropdown, lambda: {"choices": modules.sd_models.checkpoint_tiles()}, refresh=sd_models.list_models),
"sd_checkpoint_cache": OptionInfo(0, "Checkpoints to cache in RAM", gr.Slider, {"minimum": 0, "maximum": 10, "step": 1}),
"sd_vae": OptionInfo("auto", "SD VAE", gr.Dropdown, lambda: {"choices": list(sd_vae.vae_list)}, refresh=sd_vae.refresh_vae_list),
"sd_hypernetwork": OptionInfo("None", "Hypernetwork", gr.Dropdown, lambda: {"choices": ["None"] + [x for x in hypernetworks.keys()]}, refresh=reload_hypernetworks),
"sd_hypernetwork_strength": OptionInfo(1.0, "Hypernetwork strength", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.001}),
"inpainting_mask_weight": OptionInfo(1.0, "Inpainting conditioning mask strength", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}),
"img2img_color_correction": OptionInfo(False, "Apply color correction to img2img results to match original colors."),
"img2img_fix_steps": OptionInfo(False, "With img2img, do exactly the amount of steps the slider specifies (normally you'd do less with less denoising)."),
"enable_quantization": OptionInfo(False, "Enable quantization in K samplers for sharper and cleaner results. This may change existing seeds. Requires restart to apply."),
"enable_emphasis": OptionInfo(True, "Emphasis: use (text) to make model pay more attention to text and [text] to make it pay less attention"),
"use_old_emphasis_implementation": OptionInfo(False, "Use old emphasis implementation. Can be useful to reproduce old seeds."),
"enable_batch_seeds": OptionInfo(True, "Make K-diffusion samplers produce same images in a batch as when making a single image"),
"comma_padding_backtrack": OptionInfo(20, "Increase coherency by padding from the last comma within n tokens when using more than 75 tokens", gr.Slider, {"minimum": 0, "maximum": 74, "step": 1 }),
"filter_nsfw": OptionInfo(False, "Filter NSFW content"),
'CLIP_stop_at_last_layers': OptionInfo(1, "Stop At last layers of CLIP model", gr.Slider, {"minimum": 1, "maximum": 12, "step": 1}),
"random_artist_categories": OptionInfo([], "Allowed categories for random artists selection when using the Roll button", gr.CheckboxGroup, {"choices": artist_db.categories()}),
}))
options_templates.update(options_section(('interrogate', "Interrogate Options"), {
"interrogate_keep_models_in_memory": OptionInfo(False, "Interrogate: keep models in VRAM"),
"interrogate_use_builtin_artists": OptionInfo(True, "Interrogate: use artists from artists.csv"),
"interrogate_return_ranks": OptionInfo(False, "Interrogate: include ranks of model tags matches in results (Has no effect on caption-based interrogators)."),
"interrogate_clip_num_beams": OptionInfo(1, "Interrogate: num_beams for BLIP", gr.Slider, {"minimum": 1, "maximum": 16, "step": 1}),
"interrogate_clip_min_length": OptionInfo(24, "Interrogate: minimum description length (excluding artists, etc..)", gr.Slider, {"minimum": 1, "maximum": 128, "step": 1}),
"interrogate_clip_max_length": OptionInfo(48, "Interrogate: maximum description length", gr.Slider, {"minimum": 1, "maximum": 256, "step": 1}),
"interrogate_clip_dict_limit": OptionInfo(1500, "CLIP: maximum number of lines in text file (0 = No limit)"),
"interrogate_deepbooru_score_threshold": OptionInfo(0.5, "Interrogate: deepbooru score threshold", gr.Slider, {"minimum": 0, "maximum": 1, "step": 0.01}),
"deepbooru_sort_alpha": OptionInfo(True, "Interrogate: deepbooru sort alphabetically"),
"deepbooru_use_spaces": OptionInfo(False, "use spaces for tags in deepbooru"),
"deepbooru_escape": OptionInfo(True, "escape (\\) brackets in deepbooru (so they are used as literal brackets and not for emphasis)"),
}))
options_templates.update(options_section(('ui', "User interface"), {
"show_progressbar": OptionInfo(True, "Show progressbar"),
"show_progress_every_n_steps": OptionInfo(0, "Show image creation progress every N sampling steps. Set to 0 to disable. Set to -1 to show after completion of batch.", gr.Slider, {"minimum": -1, "maximum": 32, "step": 1}),
"show_progress_grid": OptionInfo(True, "Show previews of all images generated in a batch as a grid"),
"return_grid": OptionInfo(True, "Show grid in results for web"),
"do_not_show_images": OptionInfo(False, "Do not show any images in results for web"),
"add_model_hash_to_info": OptionInfo(True, "Add model hash to generation information"),
"add_model_name_to_info": OptionInfo(False, "Add model name to generation information"),
"disable_weights_auto_swap": OptionInfo(False, "When reading generation parameters from text into UI (from PNG info or pasted text), do not change the selected model/checkpoint."),
"send_seed": OptionInfo(True, "Send seed when sending prompt or image to other interface"),
"font": OptionInfo("", "Font for image grids that have text"),
"js_modal_lightbox": OptionInfo(True, "Enable full page image viewer"),
"js_modal_lightbox_initially_zoomed": OptionInfo(True, "Show images zoomed in by default in full page image viewer"),
"show_progress_in_title": OptionInfo(True, "Show generation progress in window title."),
'quicksettings': OptionInfo("sd_model_checkpoint", "Quicksettings list"),
'localization': OptionInfo("None", "Localization (requires restart)", gr.Dropdown, lambda: {"choices": ["None"] + list(localization.localizations.keys())}, refresh=lambda: localization.list_localizations(cmd_opts.localizations_dir)),
}))
options_templates.update(options_section(('sampler-params', "Sampler parameters"), {
"hide_samplers": OptionInfo([], "Hide samplers in user interface (requires restart)", gr.CheckboxGroup, lambda: {"choices": [x.name for x in sd_samplers.all_samplers]}),
"eta_ddim": OptionInfo(0.0, "eta (noise multiplier) for DDIM", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}),
"eta_ancestral": OptionInfo(1.0, "eta (noise multiplier) for ancestral samplers", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}),
"ddim_discretize": OptionInfo('uniform', "img2img DDIM discretize", gr.Radio, {"choices": ['uniform', 'quad']}),
's_churn': OptionInfo(0.0, "sigma churn", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}),
's_tmin': OptionInfo(0.0, "sigma tmin", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}),
's_noise': OptionInfo(1.0, "sigma noise", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}),
'eta_noise_seed_delta': OptionInfo(0, "Eta noise seed delta", gr.Number, {"precision": 0}),
}))
options_templates.update(options_section((None, "Hidden options"), {
"disabled_extensions": OptionInfo([], "Disable those extensions"),
}))
options_templates.update()
class Options:
data = None
data_labels = options_templates
typemap = {int: float}
def __init__(self):
self.data = {k: v.default for k, v in self.data_labels.items()}
def __setattr__(self, key, value):
if self.data is not None:
if key in self.data or key in self.data_labels:
assert not cmd_opts.freeze_settings, "changing settings is disabled"
info = opts.data_labels.get(key, None)
comp_args = info.component_args if info else None
if isinstance(comp_args, dict) and comp_args.get('visible', True) is False:
raise RuntimeError(f"not possible to set {key} because it is restricted")
if cmd_opts.hide_ui_dir_config and key in restricted_opts:
raise RuntimeError(f"not possible to set {key} because it is restricted")
self.data[key] = value
return
return super(Options, self).__setattr__(key, value)
def __getattr__(self, item):
if self.data is not None:
if item in self.data:
return self.data[item]
if item in self.data_labels:
return self.data_labels[item].default
return super(Options, self).__getattribute__(item)
def save(self, filename):
assert not cmd_opts.freeze_settings, "saving settings is disabled"
with open(filename, "w", encoding="utf8") as file:
json.dump(self.data, file, indent=4)
def same_type(self, x, y):
if x is None or y is None:
return True
type_x = self.typemap.get(type(x), type(x))
type_y = self.typemap.get(type(y), type(y))
return type_x == type_y
def load(self, filename):
with open(filename, "r", encoding="utf8") as file:
self.data = json.load(file)
bad_settings = 0
for k, v in self.data.items():
info = self.data_labels.get(k, None)
if info is not None and not self.same_type(info.default, v):
print(f"Warning: bad setting value: {k}: {v} ({type(v).__name__}; expected {type(info.default).__name__})", file=sys.stderr)
bad_settings += 1
if bad_settings > 0:
print(f"The program is likely to not work with bad settings.\nSettings file: {filename}\nEither fix the file, or delete it and restart.", file=sys.stderr)
def onchange(self, key, func, call=True):
item = self.data_labels.get(key)
item.onchange = func
if call:
func()
def dumpjson(self):
d = {k: self.data.get(k, self.data_labels.get(k).default) for k in self.data_labels.keys()}
return json.dumps(d)
def add_option(self, key, info):
self.data_labels[key] = info
def reorder(self):
"""reorder settings so that all items related to section always go together"""
section_ids = {}
settings_items = self.data_labels.items()
for k, item in settings_items:
if item.section not in section_ids:
section_ids[item.section] = len(section_ids)
self.data_labels = {k: v for k, v in sorted(settings_items, key=lambda x: section_ids[x[1].section])}
opts = Options()
if os.path.exists(config_filename):
opts.load(config_filename)
sd_upscalers = []
sd_model = None
clip_model = None
progress_print_out = sys.stdout
class TotalTQDM:
def __init__(self):
self._tqdm = None
def reset(self):
self._tqdm = tqdm.tqdm(
desc="Total progress",
total=state.job_count * state.sampling_steps,
position=1,
file=progress_print_out
)
def update(self):
if not opts.multiple_tqdm or cmd_opts.disable_console_progressbars:
return
if self._tqdm is None:
self.reset()
self._tqdm.update()
def updateTotal(self, new_total):
if not opts.multiple_tqdm or cmd_opts.disable_console_progressbars:
return
if self._tqdm is None:
self.reset()
self._tqdm.total=new_total
def clear(self):
if self._tqdm is not None:
self._tqdm.close()
self._tqdm = None
total_tqdm = TotalTQDM()
mem_mon = modules.memmon.MemUsageMonitor("MemMon", device, opts)
mem_mon.start()
def listfiles(dirname):
filenames = [os.path.join(dirname, x) for x in sorted(os.listdir(dirname)) if not x.startswith(".")]
return [file for file in filenames if os.path.isfile(file)]
<|code_end|>
webui.py
<|code_start|>import os
import threading
import time
import importlib
import signal
import threading
from fastapi import FastAPI
from fastapi.middleware.gzip import GZipMiddleware
from modules.paths import script_path
from modules import devices, sd_samplers, upscaler, extensions
import modules.codeformer_model as codeformer
import modules.extras
import modules.face_restoration
import modules.gfpgan_model as gfpgan
import modules.img2img
import modules.lowvram
import modules.paths
import modules.scripts
import modules.sd_hijack
import modules.sd_models
import modules.sd_vae
import modules.shared as shared
import modules.txt2img
import modules.script_callbacks
import modules.ui
from modules import devices
from modules import modelloader
from modules.paths import script_path
from modules.shared import cmd_opts
import modules.hypernetworks.hypernetwork
queue_lock = threading.Lock()
server_name = "0.0.0.0" if cmd_opts.listen else cmd_opts.server_name
def wrap_queued_call(func):
def f(*args, **kwargs):
with queue_lock:
res = func(*args, **kwargs)
return res
return f
def wrap_gradio_gpu_call(func, extra_outputs=None):
def f(*args, **kwargs):
shared.state.begin()
with queue_lock:
res = func(*args, **kwargs)
shared.state.end()
return res
return modules.ui.wrap_gradio_call(f, extra_outputs=extra_outputs)
def initialize():
extensions.list_extensions()
if cmd_opts.ui_debug_mode:
shared.sd_upscalers = upscaler.UpscalerLanczos().scalers
modules.scripts.load_scripts()
return
modelloader.cleanup_models()
modules.sd_models.setup_model()
codeformer.setup_model(cmd_opts.codeformer_models_path)
gfpgan.setup_model(cmd_opts.gfpgan_models_path)
shared.face_restorers.append(modules.face_restoration.FaceRestoration())
modelloader.load_upscalers()
modules.scripts.load_scripts()
modules.sd_vae.refresh_vae_list()
modules.sd_models.load_model()
shared.opts.onchange("sd_model_checkpoint", wrap_queued_call(lambda: modules.sd_models.reload_model_weights()))
shared.opts.onchange("sd_vae", wrap_queued_call(lambda: modules.sd_vae.reload_vae_weights()), call=False)
shared.opts.onchange("sd_hypernetwork", wrap_queued_call(lambda: modules.hypernetworks.hypernetwork.load_hypernetwork(shared.opts.sd_hypernetwork)))
shared.opts.onchange("sd_hypernetwork_strength", modules.hypernetworks.hypernetwork.apply_strength)
if cmd_opts.tls_keyfile is not None and cmd_opts.tls_keyfile is not None:
try:
if not os.path.exists(cmd_opts.tls_keyfile):
print("Invalid path to TLS keyfile given")
if not os.path.exists(cmd_opts.tls_certfile):
print(f"Invalid path to TLS certfile: '{cmd_opts.tls_certfile}'")
except TypeError:
cmd_opts.tls_keyfile = cmd_opts.tls_certfile = None
print("TLS setup invalid, running webui without TLS")
else:
print("Running with TLS")
# make the program just exit at ctrl+c without waiting for anything
def sigint_handler(sig, frame):
print(f'Interrupted with signal {sig} in {frame}')
os._exit(0)
signal.signal(signal.SIGINT, sigint_handler)
def create_api(app):
from modules.api.api import Api
api = Api(app, queue_lock)
return api
def wait_on_server(demo=None):
while 1:
time.sleep(0.5)
if shared.state.need_restart:
shared.state.need_restart = False
time.sleep(0.5)
demo.close()
time.sleep(0.5)
break
def api_only():
initialize()
app = FastAPI()
app.add_middleware(GZipMiddleware, minimum_size=1000)
api = create_api(app)
modules.script_callbacks.app_started_callback(None, app)
api.launch(server_name="0.0.0.0" if cmd_opts.listen else "127.0.0.1", port=cmd_opts.port if cmd_opts.port else 7861)
def webui():
launch_api = cmd_opts.api
initialize()
while 1:
demo = modules.ui.create_ui(wrap_gradio_gpu_call=wrap_gradio_gpu_call)
app, local_url, share_url = demo.launch(
share=cmd_opts.share,
server_name=server_name,
server_port=cmd_opts.port,
ssl_keyfile=cmd_opts.tls_keyfile,
ssl_certfile=cmd_opts.tls_certfile,
debug=cmd_opts.gradio_debug,
auth=[tuple(cred.split(':')) for cred in cmd_opts.gradio_auth.strip('"').split(',')] if cmd_opts.gradio_auth else None,
inbrowser=cmd_opts.autolaunch,
prevent_thread_lock=True
)
# after initial launch, disable --autolaunch for subsequent restarts
cmd_opts.autolaunch = False
# gradio uses a very open CORS policy via app.user_middleware, which makes it possible for
# an attacker to trick the user into opening a malicious HTML page, which makes a request to the
# running web ui and do whatever the attcker wants, including installing an extension and
# runnnig its code. We disable this here. Suggested by RyotaK.
app.user_middleware = [x for x in app.user_middleware if x.cls.__name__ != 'CORSMiddleware']
app.add_middleware(GZipMiddleware, minimum_size=1000)
if launch_api:
create_api(app)
modules.script_callbacks.app_started_callback(demo, app)
wait_on_server(demo)
sd_samplers.set_samplers()
print('Reloading extensions')
extensions.list_extensions()
print('Reloading custom scripts')
modules.scripts.reload_scripts()
print('Reloading modules: modules.ui')
importlib.reload(modules.ui)
print('Refreshing Model List')
modules.sd_models.list_models()
print('Restarting Gradio')
if __name__ == "__main__":
if cmd_opts.nowebui:
api_only()
else:
webui()
<|code_end|>
|
[Bug]: X/Y plot ignores file format settings for grids
### Is there an existing issue for this?
- [x] I have searched the existing issues and checked the recent builds/commits
### What happened?
Files are saved in PNG format.
### Steps to reproduce the problem
1. Set File format for grids to jpg
2. Generate grid using X/Y plot
### What should have happened?
Files should be saved in specified format.
### Commit where the problem happens
ca5a9e79dc28eeaa3a161427a82e34703bf15765
### What platforms do you use to access UI ?
Other/Cloud
### What browsers do you use to access the UI ?
Google Chrome
### Command Line Arguments
_No response_
### Additional information, context and logs
_No response_
|
scripts/prompt_matrix.py
<|code_start|>import math
from collections import namedtuple
from copy import copy
import random
import modules.scripts as scripts
import gradio as gr
from modules import images
from modules.processing import process_images, Processed
from modules.shared import opts, cmd_opts, state
import modules.sd_samplers
def draw_xy_grid(xs, ys, x_label, y_label, cell):
res = []
ver_texts = [[images.GridAnnotation(y_label(y))] for y in ys]
hor_texts = [[images.GridAnnotation(x_label(x))] for x in xs]
first_pocessed = None
state.job_count = len(xs) * len(ys)
for iy, y in enumerate(ys):
for ix, x in enumerate(xs):
state.job = f"{ix + iy * len(xs) + 1} out of {len(xs) * len(ys)}"
processed = cell(x, y)
if first_pocessed is None:
first_pocessed = processed
res.append(processed.images[0])
grid = images.image_grid(res, rows=len(ys))
grid = images.draw_grid_annotations(grid, res[0].width, res[0].height, hor_texts, ver_texts)
first_pocessed.images = [grid]
return first_pocessed
class Script(scripts.Script):
def title(self):
return "Prompt matrix"
def ui(self, is_img2img):
put_at_start = gr.Checkbox(label='Put variable parts at start of prompt', value=False)
return [put_at_start]
def run(self, p, put_at_start):
modules.processing.fix_seed(p)
original_prompt = p.prompt[0] if type(p.prompt) == list else p.prompt
all_prompts = []
prompt_matrix_parts = original_prompt.split("|")
combination_count = 2 ** (len(prompt_matrix_parts) - 1)
for combination_num in range(combination_count):
selected_prompts = [text.strip().strip(',') for n, text in enumerate(prompt_matrix_parts[1:]) if combination_num & (1 << n)]
if put_at_start:
selected_prompts = selected_prompts + [prompt_matrix_parts[0]]
else:
selected_prompts = [prompt_matrix_parts[0]] + selected_prompts
all_prompts.append(", ".join(selected_prompts))
p.n_iter = math.ceil(len(all_prompts) / p.batch_size)
p.do_not_save_grid = True
print(f"Prompt matrix will create {len(all_prompts)} images using a total of {p.n_iter} batches.")
p.prompt = all_prompts
p.seed = [p.seed for _ in all_prompts]
p.prompt_for_display = original_prompt
processed = process_images(p)
grid = images.image_grid(processed.images, p.batch_size, rows=1 << ((len(prompt_matrix_parts) - 1) // 2))
grid = images.draw_prompt_matrix(grid, p.width, p.height, prompt_matrix_parts)
processed.images.insert(0, grid)
if opts.grid_save:
images.save_image(processed.images[0], p.outpath_grids, "prompt_matrix", prompt=original_prompt, seed=processed.seed, grid=True, p=p)
return processed
<|code_end|>
scripts/xy_grid.py
<|code_start|>from collections import namedtuple
from copy import copy
from itertools import permutations, chain
import random
import csv
from io import StringIO
from PIL import Image
import numpy as np
import modules.scripts as scripts
import gradio as gr
from modules import images
from modules.hypernetworks import hypernetwork
from modules.processing import process_images, Processed, get_correct_sampler, StableDiffusionProcessingTxt2Img
from modules.shared import opts, cmd_opts, state
import modules.shared as shared
import modules.sd_samplers
import modules.sd_models
import re
def apply_field(field):
def fun(p, x, xs):
setattr(p, field, x)
return fun
def apply_prompt(p, x, xs):
if xs[0] not in p.prompt and xs[0] not in p.negative_prompt:
raise RuntimeError(f"Prompt S/R did not find {xs[0]} in prompt or negative prompt.")
p.prompt = p.prompt.replace(xs[0], x)
p.negative_prompt = p.negative_prompt.replace(xs[0], x)
def apply_order(p, x, xs):
token_order = []
# Initally grab the tokens from the prompt, so they can be replaced in order of earliest seen
for token in x:
token_order.append((p.prompt.find(token), token))
token_order.sort(key=lambda t: t[0])
prompt_parts = []
# Split the prompt up, taking out the tokens
for _, token in token_order:
n = p.prompt.find(token)
prompt_parts.append(p.prompt[0:n])
p.prompt = p.prompt[n + len(token):]
# Rebuild the prompt with the tokens in the order we want
prompt_tmp = ""
for idx, part in enumerate(prompt_parts):
prompt_tmp += part
prompt_tmp += x[idx]
p.prompt = prompt_tmp + p.prompt
def build_samplers_dict(p):
samplers_dict = {}
for i, sampler in enumerate(get_correct_sampler(p)):
samplers_dict[sampler.name.lower()] = i
for alias in sampler.aliases:
samplers_dict[alias.lower()] = i
return samplers_dict
def apply_sampler(p, x, xs):
sampler_index = build_samplers_dict(p).get(x.lower(), None)
if sampler_index is None:
raise RuntimeError(f"Unknown sampler: {x}")
p.sampler_index = sampler_index
def confirm_samplers(p, xs):
samplers_dict = build_samplers_dict(p)
for x in xs:
if x.lower() not in samplers_dict.keys():
raise RuntimeError(f"Unknown sampler: {x}")
def apply_checkpoint(p, x, xs):
info = modules.sd_models.get_closet_checkpoint_match(x)
if info is None:
raise RuntimeError(f"Unknown checkpoint: {x}")
modules.sd_models.reload_model_weights(shared.sd_model, info)
p.sd_model = shared.sd_model
def confirm_checkpoints(p, xs):
for x in xs:
if modules.sd_models.get_closet_checkpoint_match(x) is None:
raise RuntimeError(f"Unknown checkpoint: {x}")
def apply_hypernetwork(p, x, xs):
if x.lower() in ["", "none"]:
name = None
else:
name = hypernetwork.find_closest_hypernetwork_name(x)
if not name:
raise RuntimeError(f"Unknown hypernetwork: {x}")
hypernetwork.load_hypernetwork(name)
def apply_hypernetwork_strength(p, x, xs):
hypernetwork.apply_strength(x)
def confirm_hypernetworks(p, xs):
for x in xs:
if x.lower() in ["", "none"]:
continue
if not hypernetwork.find_closest_hypernetwork_name(x):
raise RuntimeError(f"Unknown hypernetwork: {x}")
def apply_clip_skip(p, x, xs):
opts.data["CLIP_stop_at_last_layers"] = x
def format_value_add_label(p, opt, x):
if type(x) == float:
x = round(x, 8)
return f"{opt.label}: {x}"
def format_value(p, opt, x):
if type(x) == float:
x = round(x, 8)
return x
def format_value_join_list(p, opt, x):
return ", ".join(x)
def do_nothing(p, x, xs):
pass
def format_nothing(p, opt, x):
return ""
def str_permutations(x):
"""dummy function for specifying it in AxisOption's type when you want to get a list of permutations"""
return x
AxisOption = namedtuple("AxisOption", ["label", "type", "apply", "format_value", "confirm"])
AxisOptionImg2Img = namedtuple("AxisOptionImg2Img", ["label", "type", "apply", "format_value", "confirm"])
axis_options = [
AxisOption("Nothing", str, do_nothing, format_nothing, None),
AxisOption("Seed", int, apply_field("seed"), format_value_add_label, None),
AxisOption("Var. seed", int, apply_field("subseed"), format_value_add_label, None),
AxisOption("Var. strength", float, apply_field("subseed_strength"), format_value_add_label, None),
AxisOption("Steps", int, apply_field("steps"), format_value_add_label, None),
AxisOption("CFG Scale", float, apply_field("cfg_scale"), format_value_add_label, None),
AxisOption("Prompt S/R", str, apply_prompt, format_value, None),
AxisOption("Prompt order", str_permutations, apply_order, format_value_join_list, None),
AxisOption("Sampler", str, apply_sampler, format_value, confirm_samplers),
AxisOption("Checkpoint name", str, apply_checkpoint, format_value, confirm_checkpoints),
AxisOption("Hypernetwork", str, apply_hypernetwork, format_value, confirm_hypernetworks),
AxisOption("Hypernet str.", float, apply_hypernetwork_strength, format_value_add_label, None),
AxisOption("Sigma Churn", float, apply_field("s_churn"), format_value_add_label, None),
AxisOption("Sigma min", float, apply_field("s_tmin"), format_value_add_label, None),
AxisOption("Sigma max", float, apply_field("s_tmax"), format_value_add_label, None),
AxisOption("Sigma noise", float, apply_field("s_noise"), format_value_add_label, None),
AxisOption("Eta", float, apply_field("eta"), format_value_add_label, None),
AxisOption("Clip skip", int, apply_clip_skip, format_value_add_label, None),
AxisOption("Denoising", float, apply_field("denoising_strength"), format_value_add_label, None),
AxisOption("Cond. Image Mask Weight", float, apply_field("inpainting_mask_weight"), format_value_add_label, None),
]
def draw_xy_grid(p, xs, ys, x_labels, y_labels, cell, draw_legend, include_lone_images):
ver_texts = [[images.GridAnnotation(y)] for y in y_labels]
hor_texts = [[images.GridAnnotation(x)] for x in x_labels]
# Temporary list of all the images that are generated to be populated into the grid.
# Will be filled with empty images for any individual step that fails to process properly
image_cache = []
processed_result = None
cell_mode = "P"
cell_size = (1,1)
state.job_count = len(xs) * len(ys) * p.n_iter
for iy, y in enumerate(ys):
for ix, x in enumerate(xs):
state.job = f"{ix + iy * len(xs) + 1} out of {len(xs) * len(ys)}"
processed:Processed = cell(x, y)
try:
# this dereference will throw an exception if the image was not processed
# (this happens in cases such as if the user stops the process from the UI)
processed_image = processed.images[0]
if processed_result is None:
# Use our first valid processed result as a template container to hold our full results
processed_result = copy(processed)
cell_mode = processed_image.mode
cell_size = processed_image.size
processed_result.images = [Image.new(cell_mode, cell_size)]
image_cache.append(processed_image)
if include_lone_images:
processed_result.images.append(processed_image)
processed_result.all_prompts.append(processed.prompt)
processed_result.all_seeds.append(processed.seed)
processed_result.infotexts.append(processed.infotexts[0])
except:
image_cache.append(Image.new(cell_mode, cell_size))
if not processed_result:
print("Unexpected error: draw_xy_grid failed to return even a single processed image")
return Processed()
grid = images.image_grid(image_cache, rows=len(ys))
if draw_legend:
grid = images.draw_grid_annotations(grid, cell_size[0], cell_size[1], hor_texts, ver_texts)
processed_result.images[0] = grid
return processed_result
class SharedSettingsStackHelper(object):
def __enter__(self):
self.CLIP_stop_at_last_layers = opts.CLIP_stop_at_last_layers
self.hypernetwork = opts.sd_hypernetwork
self.model = shared.sd_model
def __exit__(self, exc_type, exc_value, tb):
modules.sd_models.reload_model_weights(self.model)
hypernetwork.load_hypernetwork(self.hypernetwork)
hypernetwork.apply_strength()
opts.data["CLIP_stop_at_last_layers"] = self.CLIP_stop_at_last_layers
re_range = re.compile(r"\s*([+-]?\s*\d+)\s*-\s*([+-]?\s*\d+)(?:\s*\(([+-]\d+)\s*\))?\s*")
re_range_float = re.compile(r"\s*([+-]?\s*\d+(?:.\d*)?)\s*-\s*([+-]?\s*\d+(?:.\d*)?)(?:\s*\(([+-]\d+(?:.\d*)?)\s*\))?\s*")
re_range_count = re.compile(r"\s*([+-]?\s*\d+)\s*-\s*([+-]?\s*\d+)(?:\s*\[(\d+)\s*\])?\s*")
re_range_count_float = re.compile(r"\s*([+-]?\s*\d+(?:.\d*)?)\s*-\s*([+-]?\s*\d+(?:.\d*)?)(?:\s*\[(\d+(?:.\d*)?)\s*\])?\s*")
class Script(scripts.Script):
def title(self):
return "X/Y plot"
def ui(self, is_img2img):
current_axis_options = [x for x in axis_options if type(x) == AxisOption or type(x) == AxisOptionImg2Img and is_img2img]
with gr.Row():
x_type = gr.Dropdown(label="X type", choices=[x.label for x in current_axis_options], value=current_axis_options[1].label, type="index", elem_id="x_type")
x_values = gr.Textbox(label="X values", lines=1)
with gr.Row():
y_type = gr.Dropdown(label="Y type", choices=[x.label for x in current_axis_options], value=current_axis_options[0].label, type="index", elem_id="y_type")
y_values = gr.Textbox(label="Y values", lines=1)
draw_legend = gr.Checkbox(label='Draw legend', value=True)
include_lone_images = gr.Checkbox(label='Include Separate Images', value=False)
no_fixed_seeds = gr.Checkbox(label='Keep -1 for seeds', value=False)
return [x_type, x_values, y_type, y_values, draw_legend, include_lone_images, no_fixed_seeds]
def run(self, p, x_type, x_values, y_type, y_values, draw_legend, include_lone_images, no_fixed_seeds):
if not no_fixed_seeds:
modules.processing.fix_seed(p)
if not opts.return_grid:
p.batch_size = 1
def process_axis(opt, vals):
if opt.label == 'Nothing':
return [0]
valslist = [x.strip() for x in chain.from_iterable(csv.reader(StringIO(vals)))]
if opt.type == int:
valslist_ext = []
for val in valslist:
m = re_range.fullmatch(val)
mc = re_range_count.fullmatch(val)
if m is not None:
start = int(m.group(1))
end = int(m.group(2))+1
step = int(m.group(3)) if m.group(3) is not None else 1
valslist_ext += list(range(start, end, step))
elif mc is not None:
start = int(mc.group(1))
end = int(mc.group(2))
num = int(mc.group(3)) if mc.group(3) is not None else 1
valslist_ext += [int(x) for x in np.linspace(start=start, stop=end, num=num).tolist()]
else:
valslist_ext.append(val)
valslist = valslist_ext
elif opt.type == float:
valslist_ext = []
for val in valslist:
m = re_range_float.fullmatch(val)
mc = re_range_count_float.fullmatch(val)
if m is not None:
start = float(m.group(1))
end = float(m.group(2))
step = float(m.group(3)) if m.group(3) is not None else 1
valslist_ext += np.arange(start, end + step, step).tolist()
elif mc is not None:
start = float(mc.group(1))
end = float(mc.group(2))
num = int(mc.group(3)) if mc.group(3) is not None else 1
valslist_ext += np.linspace(start=start, stop=end, num=num).tolist()
else:
valslist_ext.append(val)
valslist = valslist_ext
elif opt.type == str_permutations:
valslist = list(permutations(valslist))
valslist = [opt.type(x) for x in valslist]
# Confirm options are valid before starting
if opt.confirm:
opt.confirm(p, valslist)
return valslist
x_opt = axis_options[x_type]
xs = process_axis(x_opt, x_values)
y_opt = axis_options[y_type]
ys = process_axis(y_opt, y_values)
def fix_axis_seeds(axis_opt, axis_list):
if axis_opt.label in ['Seed','Var. seed']:
return [int(random.randrange(4294967294)) if val is None or val == '' or val == -1 else val for val in axis_list]
else:
return axis_list
if not no_fixed_seeds:
xs = fix_axis_seeds(x_opt, xs)
ys = fix_axis_seeds(y_opt, ys)
if x_opt.label == 'Steps':
total_steps = sum(xs) * len(ys)
elif y_opt.label == 'Steps':
total_steps = sum(ys) * len(xs)
else:
total_steps = p.steps * len(xs) * len(ys)
if isinstance(p, StableDiffusionProcessingTxt2Img) and p.enable_hr:
total_steps *= 2
print(f"X/Y plot will create {len(xs) * len(ys) * p.n_iter} images on a {len(xs)}x{len(ys)} grid. (Total steps to process: {total_steps * p.n_iter})")
shared.total_tqdm.updateTotal(total_steps * p.n_iter)
def cell(x, y):
pc = copy(p)
x_opt.apply(pc, x, xs)
y_opt.apply(pc, y, ys)
return process_images(pc)
with SharedSettingsStackHelper():
processed = draw_xy_grid(
p,
xs=xs,
ys=ys,
x_labels=[x_opt.format_value(p, x_opt, x) for x in xs],
y_labels=[y_opt.format_value(p, y_opt, y) for y in ys],
cell=cell,
draw_legend=draw_legend,
include_lone_images=include_lone_images
)
if opts.grid_save:
images.save_image(processed.images[0], p.outpath_grids, "xy_grid", prompt=p.prompt, seed=processed.seed, grid=True, p=p)
return processed
<|code_end|>
|
scripts/prompt_matrix.py
<|code_start|>import math
from collections import namedtuple
from copy import copy
import random
import modules.scripts as scripts
import gradio as gr
from modules import images
from modules.processing import process_images, Processed
from modules.shared import opts, cmd_opts, state
import modules.sd_samplers
def draw_xy_grid(xs, ys, x_label, y_label, cell):
res = []
ver_texts = [[images.GridAnnotation(y_label(y))] for y in ys]
hor_texts = [[images.GridAnnotation(x_label(x))] for x in xs]
first_pocessed = None
state.job_count = len(xs) * len(ys)
for iy, y in enumerate(ys):
for ix, x in enumerate(xs):
state.job = f"{ix + iy * len(xs) + 1} out of {len(xs) * len(ys)}"
processed = cell(x, y)
if first_pocessed is None:
first_pocessed = processed
res.append(processed.images[0])
grid = images.image_grid(res, rows=len(ys))
grid = images.draw_grid_annotations(grid, res[0].width, res[0].height, hor_texts, ver_texts)
first_pocessed.images = [grid]
return first_pocessed
class Script(scripts.Script):
def title(self):
return "Prompt matrix"
def ui(self, is_img2img):
put_at_start = gr.Checkbox(label='Put variable parts at start of prompt', value=False)
return [put_at_start]
def run(self, p, put_at_start):
modules.processing.fix_seed(p)
original_prompt = p.prompt[0] if type(p.prompt) == list else p.prompt
all_prompts = []
prompt_matrix_parts = original_prompt.split("|")
combination_count = 2 ** (len(prompt_matrix_parts) - 1)
for combination_num in range(combination_count):
selected_prompts = [text.strip().strip(',') for n, text in enumerate(prompt_matrix_parts[1:]) if combination_num & (1 << n)]
if put_at_start:
selected_prompts = selected_prompts + [prompt_matrix_parts[0]]
else:
selected_prompts = [prompt_matrix_parts[0]] + selected_prompts
all_prompts.append(", ".join(selected_prompts))
p.n_iter = math.ceil(len(all_prompts) / p.batch_size)
p.do_not_save_grid = True
print(f"Prompt matrix will create {len(all_prompts)} images using a total of {p.n_iter} batches.")
p.prompt = all_prompts
p.seed = [p.seed for _ in all_prompts]
p.prompt_for_display = original_prompt
processed = process_images(p)
grid = images.image_grid(processed.images, p.batch_size, rows=1 << ((len(prompt_matrix_parts) - 1) // 2))
grid = images.draw_prompt_matrix(grid, p.width, p.height, prompt_matrix_parts)
processed.images.insert(0, grid)
if opts.grid_save:
images.save_image(processed.images[0], p.outpath_grids, "prompt_matrix", extension=opts.grid_format, prompt=original_prompt, seed=processed.seed, grid=True, p=p)
return processed
<|code_end|>
scripts/xy_grid.py
<|code_start|>from collections import namedtuple
from copy import copy
from itertools import permutations, chain
import random
import csv
from io import StringIO
from PIL import Image
import numpy as np
import modules.scripts as scripts
import gradio as gr
from modules import images
from modules.hypernetworks import hypernetwork
from modules.processing import process_images, Processed, get_correct_sampler, StableDiffusionProcessingTxt2Img
from modules.shared import opts, cmd_opts, state
import modules.shared as shared
import modules.sd_samplers
import modules.sd_models
import re
def apply_field(field):
def fun(p, x, xs):
setattr(p, field, x)
return fun
def apply_prompt(p, x, xs):
if xs[0] not in p.prompt and xs[0] not in p.negative_prompt:
raise RuntimeError(f"Prompt S/R did not find {xs[0]} in prompt or negative prompt.")
p.prompt = p.prompt.replace(xs[0], x)
p.negative_prompt = p.negative_prompt.replace(xs[0], x)
def apply_order(p, x, xs):
token_order = []
# Initally grab the tokens from the prompt, so they can be replaced in order of earliest seen
for token in x:
token_order.append((p.prompt.find(token), token))
token_order.sort(key=lambda t: t[0])
prompt_parts = []
# Split the prompt up, taking out the tokens
for _, token in token_order:
n = p.prompt.find(token)
prompt_parts.append(p.prompt[0:n])
p.prompt = p.prompt[n + len(token):]
# Rebuild the prompt with the tokens in the order we want
prompt_tmp = ""
for idx, part in enumerate(prompt_parts):
prompt_tmp += part
prompt_tmp += x[idx]
p.prompt = prompt_tmp + p.prompt
def build_samplers_dict(p):
samplers_dict = {}
for i, sampler in enumerate(get_correct_sampler(p)):
samplers_dict[sampler.name.lower()] = i
for alias in sampler.aliases:
samplers_dict[alias.lower()] = i
return samplers_dict
def apply_sampler(p, x, xs):
sampler_index = build_samplers_dict(p).get(x.lower(), None)
if sampler_index is None:
raise RuntimeError(f"Unknown sampler: {x}")
p.sampler_index = sampler_index
def confirm_samplers(p, xs):
samplers_dict = build_samplers_dict(p)
for x in xs:
if x.lower() not in samplers_dict.keys():
raise RuntimeError(f"Unknown sampler: {x}")
def apply_checkpoint(p, x, xs):
info = modules.sd_models.get_closet_checkpoint_match(x)
if info is None:
raise RuntimeError(f"Unknown checkpoint: {x}")
modules.sd_models.reload_model_weights(shared.sd_model, info)
p.sd_model = shared.sd_model
def confirm_checkpoints(p, xs):
for x in xs:
if modules.sd_models.get_closet_checkpoint_match(x) is None:
raise RuntimeError(f"Unknown checkpoint: {x}")
def apply_hypernetwork(p, x, xs):
if x.lower() in ["", "none"]:
name = None
else:
name = hypernetwork.find_closest_hypernetwork_name(x)
if not name:
raise RuntimeError(f"Unknown hypernetwork: {x}")
hypernetwork.load_hypernetwork(name)
def apply_hypernetwork_strength(p, x, xs):
hypernetwork.apply_strength(x)
def confirm_hypernetworks(p, xs):
for x in xs:
if x.lower() in ["", "none"]:
continue
if not hypernetwork.find_closest_hypernetwork_name(x):
raise RuntimeError(f"Unknown hypernetwork: {x}")
def apply_clip_skip(p, x, xs):
opts.data["CLIP_stop_at_last_layers"] = x
def format_value_add_label(p, opt, x):
if type(x) == float:
x = round(x, 8)
return f"{opt.label}: {x}"
def format_value(p, opt, x):
if type(x) == float:
x = round(x, 8)
return x
def format_value_join_list(p, opt, x):
return ", ".join(x)
def do_nothing(p, x, xs):
pass
def format_nothing(p, opt, x):
return ""
def str_permutations(x):
"""dummy function for specifying it in AxisOption's type when you want to get a list of permutations"""
return x
AxisOption = namedtuple("AxisOption", ["label", "type", "apply", "format_value", "confirm"])
AxisOptionImg2Img = namedtuple("AxisOptionImg2Img", ["label", "type", "apply", "format_value", "confirm"])
axis_options = [
AxisOption("Nothing", str, do_nothing, format_nothing, None),
AxisOption("Seed", int, apply_field("seed"), format_value_add_label, None),
AxisOption("Var. seed", int, apply_field("subseed"), format_value_add_label, None),
AxisOption("Var. strength", float, apply_field("subseed_strength"), format_value_add_label, None),
AxisOption("Steps", int, apply_field("steps"), format_value_add_label, None),
AxisOption("CFG Scale", float, apply_field("cfg_scale"), format_value_add_label, None),
AxisOption("Prompt S/R", str, apply_prompt, format_value, None),
AxisOption("Prompt order", str_permutations, apply_order, format_value_join_list, None),
AxisOption("Sampler", str, apply_sampler, format_value, confirm_samplers),
AxisOption("Checkpoint name", str, apply_checkpoint, format_value, confirm_checkpoints),
AxisOption("Hypernetwork", str, apply_hypernetwork, format_value, confirm_hypernetworks),
AxisOption("Hypernet str.", float, apply_hypernetwork_strength, format_value_add_label, None),
AxisOption("Sigma Churn", float, apply_field("s_churn"), format_value_add_label, None),
AxisOption("Sigma min", float, apply_field("s_tmin"), format_value_add_label, None),
AxisOption("Sigma max", float, apply_field("s_tmax"), format_value_add_label, None),
AxisOption("Sigma noise", float, apply_field("s_noise"), format_value_add_label, None),
AxisOption("Eta", float, apply_field("eta"), format_value_add_label, None),
AxisOption("Clip skip", int, apply_clip_skip, format_value_add_label, None),
AxisOption("Denoising", float, apply_field("denoising_strength"), format_value_add_label, None),
AxisOption("Cond. Image Mask Weight", float, apply_field("inpainting_mask_weight"), format_value_add_label, None),
]
def draw_xy_grid(p, xs, ys, x_labels, y_labels, cell, draw_legend, include_lone_images):
ver_texts = [[images.GridAnnotation(y)] for y in y_labels]
hor_texts = [[images.GridAnnotation(x)] for x in x_labels]
# Temporary list of all the images that are generated to be populated into the grid.
# Will be filled with empty images for any individual step that fails to process properly
image_cache = []
processed_result = None
cell_mode = "P"
cell_size = (1,1)
state.job_count = len(xs) * len(ys) * p.n_iter
for iy, y in enumerate(ys):
for ix, x in enumerate(xs):
state.job = f"{ix + iy * len(xs) + 1} out of {len(xs) * len(ys)}"
processed:Processed = cell(x, y)
try:
# this dereference will throw an exception if the image was not processed
# (this happens in cases such as if the user stops the process from the UI)
processed_image = processed.images[0]
if processed_result is None:
# Use our first valid processed result as a template container to hold our full results
processed_result = copy(processed)
cell_mode = processed_image.mode
cell_size = processed_image.size
processed_result.images = [Image.new(cell_mode, cell_size)]
image_cache.append(processed_image)
if include_lone_images:
processed_result.images.append(processed_image)
processed_result.all_prompts.append(processed.prompt)
processed_result.all_seeds.append(processed.seed)
processed_result.infotexts.append(processed.infotexts[0])
except:
image_cache.append(Image.new(cell_mode, cell_size))
if not processed_result:
print("Unexpected error: draw_xy_grid failed to return even a single processed image")
return Processed()
grid = images.image_grid(image_cache, rows=len(ys))
if draw_legend:
grid = images.draw_grid_annotations(grid, cell_size[0], cell_size[1], hor_texts, ver_texts)
processed_result.images[0] = grid
return processed_result
class SharedSettingsStackHelper(object):
def __enter__(self):
self.CLIP_stop_at_last_layers = opts.CLIP_stop_at_last_layers
self.hypernetwork = opts.sd_hypernetwork
self.model = shared.sd_model
def __exit__(self, exc_type, exc_value, tb):
modules.sd_models.reload_model_weights(self.model)
hypernetwork.load_hypernetwork(self.hypernetwork)
hypernetwork.apply_strength()
opts.data["CLIP_stop_at_last_layers"] = self.CLIP_stop_at_last_layers
re_range = re.compile(r"\s*([+-]?\s*\d+)\s*-\s*([+-]?\s*\d+)(?:\s*\(([+-]\d+)\s*\))?\s*")
re_range_float = re.compile(r"\s*([+-]?\s*\d+(?:.\d*)?)\s*-\s*([+-]?\s*\d+(?:.\d*)?)(?:\s*\(([+-]\d+(?:.\d*)?)\s*\))?\s*")
re_range_count = re.compile(r"\s*([+-]?\s*\d+)\s*-\s*([+-]?\s*\d+)(?:\s*\[(\d+)\s*\])?\s*")
re_range_count_float = re.compile(r"\s*([+-]?\s*\d+(?:.\d*)?)\s*-\s*([+-]?\s*\d+(?:.\d*)?)(?:\s*\[(\d+(?:.\d*)?)\s*\])?\s*")
class Script(scripts.Script):
def title(self):
return "X/Y plot"
def ui(self, is_img2img):
current_axis_options = [x for x in axis_options if type(x) == AxisOption or type(x) == AxisOptionImg2Img and is_img2img]
with gr.Row():
x_type = gr.Dropdown(label="X type", choices=[x.label for x in current_axis_options], value=current_axis_options[1].label, type="index", elem_id="x_type")
x_values = gr.Textbox(label="X values", lines=1)
with gr.Row():
y_type = gr.Dropdown(label="Y type", choices=[x.label for x in current_axis_options], value=current_axis_options[0].label, type="index", elem_id="y_type")
y_values = gr.Textbox(label="Y values", lines=1)
draw_legend = gr.Checkbox(label='Draw legend', value=True)
include_lone_images = gr.Checkbox(label='Include Separate Images', value=False)
no_fixed_seeds = gr.Checkbox(label='Keep -1 for seeds', value=False)
return [x_type, x_values, y_type, y_values, draw_legend, include_lone_images, no_fixed_seeds]
def run(self, p, x_type, x_values, y_type, y_values, draw_legend, include_lone_images, no_fixed_seeds):
if not no_fixed_seeds:
modules.processing.fix_seed(p)
if not opts.return_grid:
p.batch_size = 1
def process_axis(opt, vals):
if opt.label == 'Nothing':
return [0]
valslist = [x.strip() for x in chain.from_iterable(csv.reader(StringIO(vals)))]
if opt.type == int:
valslist_ext = []
for val in valslist:
m = re_range.fullmatch(val)
mc = re_range_count.fullmatch(val)
if m is not None:
start = int(m.group(1))
end = int(m.group(2))+1
step = int(m.group(3)) if m.group(3) is not None else 1
valslist_ext += list(range(start, end, step))
elif mc is not None:
start = int(mc.group(1))
end = int(mc.group(2))
num = int(mc.group(3)) if mc.group(3) is not None else 1
valslist_ext += [int(x) for x in np.linspace(start=start, stop=end, num=num).tolist()]
else:
valslist_ext.append(val)
valslist = valslist_ext
elif opt.type == float:
valslist_ext = []
for val in valslist:
m = re_range_float.fullmatch(val)
mc = re_range_count_float.fullmatch(val)
if m is not None:
start = float(m.group(1))
end = float(m.group(2))
step = float(m.group(3)) if m.group(3) is not None else 1
valslist_ext += np.arange(start, end + step, step).tolist()
elif mc is not None:
start = float(mc.group(1))
end = float(mc.group(2))
num = int(mc.group(3)) if mc.group(3) is not None else 1
valslist_ext += np.linspace(start=start, stop=end, num=num).tolist()
else:
valslist_ext.append(val)
valslist = valslist_ext
elif opt.type == str_permutations:
valslist = list(permutations(valslist))
valslist = [opt.type(x) for x in valslist]
# Confirm options are valid before starting
if opt.confirm:
opt.confirm(p, valslist)
return valslist
x_opt = axis_options[x_type]
xs = process_axis(x_opt, x_values)
y_opt = axis_options[y_type]
ys = process_axis(y_opt, y_values)
def fix_axis_seeds(axis_opt, axis_list):
if axis_opt.label in ['Seed','Var. seed']:
return [int(random.randrange(4294967294)) if val is None or val == '' or val == -1 else val for val in axis_list]
else:
return axis_list
if not no_fixed_seeds:
xs = fix_axis_seeds(x_opt, xs)
ys = fix_axis_seeds(y_opt, ys)
if x_opt.label == 'Steps':
total_steps = sum(xs) * len(ys)
elif y_opt.label == 'Steps':
total_steps = sum(ys) * len(xs)
else:
total_steps = p.steps * len(xs) * len(ys)
if isinstance(p, StableDiffusionProcessingTxt2Img) and p.enable_hr:
total_steps *= 2
print(f"X/Y plot will create {len(xs) * len(ys) * p.n_iter} images on a {len(xs)}x{len(ys)} grid. (Total steps to process: {total_steps * p.n_iter})")
shared.total_tqdm.updateTotal(total_steps * p.n_iter)
def cell(x, y):
pc = copy(p)
x_opt.apply(pc, x, xs)
y_opt.apply(pc, y, ys)
return process_images(pc)
with SharedSettingsStackHelper():
processed = draw_xy_grid(
p,
xs=xs,
ys=ys,
x_labels=[x_opt.format_value(p, x_opt, x) for x in xs],
y_labels=[y_opt.format_value(p, y_opt, y) for y in ys],
cell=cell,
draw_legend=draw_legend,
include_lone_images=include_lone_images
)
if opts.grid_save:
images.save_image(processed.images[0], p.outpath_grids, "xy_grid", extension=opts.grid_format, prompt=p.prompt, seed=processed.seed, grid=True, p=p)
return processed
<|code_end|>
|
Problems with LDSR upscaling
When using the new LDSR upscaling feature, be it via SD upscaling in img2img or directly in "Extras", black rectangles appear in the outputs on the right and at the bottom when upscaling a 230x219 photo.

original

upscaled
no issues when running it on a 512x704 image

original

upscaled
|
modules/ldsr_model_arch.py
<|code_start|>import gc
import time
import warnings
import numpy as np
import torch
import torchvision
from PIL import Image
from einops import rearrange, repeat
from omegaconf import OmegaConf
from ldm.models.diffusion.ddim import DDIMSampler
from ldm.util import instantiate_from_config, ismap
warnings.filterwarnings("ignore", category=UserWarning)
# Create LDSR Class
class LDSR:
def load_model_from_config(self, half_attention):
print(f"Loading model from {self.modelPath}")
pl_sd = torch.load(self.modelPath, map_location="cpu")
sd = pl_sd["state_dict"]
config = OmegaConf.load(self.yamlPath)
model = instantiate_from_config(config.model)
model.load_state_dict(sd, strict=False)
model.cuda()
if half_attention:
model = model.half()
model.eval()
return {"model": model}
def __init__(self, model_path, yaml_path):
self.modelPath = model_path
self.yamlPath = yaml_path
@staticmethod
def run(model, selected_path, custom_steps, eta):
example = get_cond(selected_path)
n_runs = 1
guider = None
ckwargs = None
ddim_use_x0_pred = False
temperature = 1.
eta = eta
custom_shape = None
height, width = example["image"].shape[1:3]
split_input = height >= 128 and width >= 128
if split_input:
ks = 128
stride = 64
vqf = 4 #
model.split_input_params = {"ks": (ks, ks), "stride": (stride, stride),
"vqf": vqf,
"patch_distributed_vq": True,
"tie_braker": False,
"clip_max_weight": 0.5,
"clip_min_weight": 0.01,
"clip_max_tie_weight": 0.5,
"clip_min_tie_weight": 0.01}
else:
if hasattr(model, "split_input_params"):
delattr(model, "split_input_params")
x_t = None
logs = None
for n in range(n_runs):
if custom_shape is not None:
x_t = torch.randn(1, custom_shape[1], custom_shape[2], custom_shape[3]).to(model.device)
x_t = repeat(x_t, '1 c h w -> b c h w', b=custom_shape[0])
logs = make_convolutional_sample(example, model,
custom_steps=custom_steps,
eta=eta, quantize_x0=False,
custom_shape=custom_shape,
temperature=temperature, noise_dropout=0.,
corrector=guider, corrector_kwargs=ckwargs, x_T=x_t,
ddim_use_x0_pred=ddim_use_x0_pred
)
return logs
def super_resolution(self, image, steps=100, target_scale=2, half_attention=False):
model = self.load_model_from_config(half_attention)
# Run settings
diffusion_steps = int(steps)
eta = 1.0
down_sample_method = 'Lanczos'
gc.collect()
torch.cuda.empty_cache()
im_og = image
width_og, height_og = im_og.size
# If we can adjust the max upscale size, then the 4 below should be our variable
down_sample_rate = target_scale / 4
wd = width_og * down_sample_rate
hd = height_og * down_sample_rate
width_downsampled_pre = int(wd)
height_downsampled_pre = int(hd)
if down_sample_rate != 1:
print(
f'Downsampling from [{width_og}, {height_og}] to [{width_downsampled_pre}, {height_downsampled_pre}]')
im_og = im_og.resize((width_downsampled_pre, height_downsampled_pre), Image.LANCZOS)
else:
print(f"Down sample rate is 1 from {target_scale} / 4 (Not downsampling)")
logs = self.run(model["model"], im_og, diffusion_steps, eta)
sample = logs["sample"]
sample = sample.detach().cpu()
sample = torch.clamp(sample, -1., 1.)
sample = (sample + 1.) / 2. * 255
sample = sample.numpy().astype(np.uint8)
sample = np.transpose(sample, (0, 2, 3, 1))
a = Image.fromarray(sample[0])
del model
gc.collect()
torch.cuda.empty_cache()
return a
def get_cond(selected_path):
example = dict()
up_f = 4
c = selected_path.convert('RGB')
c = torch.unsqueeze(torchvision.transforms.ToTensor()(c), 0)
c_up = torchvision.transforms.functional.resize(c, size=[up_f * c.shape[2], up_f * c.shape[3]],
antialias=True)
c_up = rearrange(c_up, '1 c h w -> 1 h w c')
c = rearrange(c, '1 c h w -> 1 h w c')
c = 2. * c - 1.
c = c.to(torch.device("cuda"))
example["LR_image"] = c
example["image"] = c_up
return example
@torch.no_grad()
def convsample_ddim(model, cond, steps, shape, eta=1.0, callback=None, normals_sequence=None,
mask=None, x0=None, quantize_x0=False, temperature=1., score_corrector=None,
corrector_kwargs=None, x_t=None
):
ddim = DDIMSampler(model)
bs = shape[0]
shape = shape[1:]
print(f"Sampling with eta = {eta}; steps: {steps}")
samples, intermediates = ddim.sample(steps, batch_size=bs, shape=shape, conditioning=cond, callback=callback,
normals_sequence=normals_sequence, quantize_x0=quantize_x0, eta=eta,
mask=mask, x0=x0, temperature=temperature, verbose=False,
score_corrector=score_corrector,
corrector_kwargs=corrector_kwargs, x_t=x_t)
return samples, intermediates
@torch.no_grad()
def make_convolutional_sample(batch, model, custom_steps=None, eta=1.0, quantize_x0=False, custom_shape=None, temperature=1., noise_dropout=0., corrector=None,
corrector_kwargs=None, x_T=None, ddim_use_x0_pred=False):
log = dict()
z, c, x, xrec, xc = model.get_input(batch, model.first_stage_key,
return_first_stage_outputs=True,
force_c_encode=not (hasattr(model, 'split_input_params')
and model.cond_stage_key == 'coordinates_bbox'),
return_original_cond=True)
if custom_shape is not None:
z = torch.randn(custom_shape)
print(f"Generating {custom_shape[0]} samples of shape {custom_shape[1:]}")
z0 = None
log["input"] = x
log["reconstruction"] = xrec
if ismap(xc):
log["original_conditioning"] = model.to_rgb(xc)
if hasattr(model, 'cond_stage_key'):
log[model.cond_stage_key] = model.to_rgb(xc)
else:
log["original_conditioning"] = xc if xc is not None else torch.zeros_like(x)
if model.cond_stage_model:
log[model.cond_stage_key] = xc if xc is not None else torch.zeros_like(x)
if model.cond_stage_key == 'class_label':
log[model.cond_stage_key] = xc[model.cond_stage_key]
with model.ema_scope("Plotting"):
t0 = time.time()
sample, intermediates = convsample_ddim(model, c, steps=custom_steps, shape=z.shape,
eta=eta,
quantize_x0=quantize_x0, mask=None, x0=z0,
temperature=temperature, score_corrector=corrector, corrector_kwargs=corrector_kwargs,
x_t=x_T)
t1 = time.time()
if ddim_use_x0_pred:
sample = intermediates['pred_x0'][-1]
x_sample = model.decode_first_stage(sample)
try:
x_sample_noquant = model.decode_first_stage(sample, force_not_quantize=True)
log["sample_noquant"] = x_sample_noquant
log["sample_diff"] = torch.abs(x_sample_noquant - x_sample)
except:
pass
log["sample"] = x_sample
log["time"] = t1 - t0
return log
<|code_end|>
|
modules/ldsr_model_arch.py
<|code_start|>import gc
import time
import warnings
import numpy as np
import torch
import torchvision
from PIL import Image
from einops import rearrange, repeat
from omegaconf import OmegaConf
from ldm.models.diffusion.ddim import DDIMSampler
from ldm.util import instantiate_from_config, ismap
warnings.filterwarnings("ignore", category=UserWarning)
# Create LDSR Class
class LDSR:
def load_model_from_config(self, half_attention):
print(f"Loading model from {self.modelPath}")
pl_sd = torch.load(self.modelPath, map_location="cpu")
sd = pl_sd["state_dict"]
config = OmegaConf.load(self.yamlPath)
model = instantiate_from_config(config.model)
model.load_state_dict(sd, strict=False)
model.cuda()
if half_attention:
model = model.half()
model.eval()
return {"model": model}
def __init__(self, model_path, yaml_path):
self.modelPath = model_path
self.yamlPath = yaml_path
@staticmethod
def run(model, selected_path, custom_steps, eta):
example = get_cond(selected_path)
n_runs = 1
guider = None
ckwargs = None
ddim_use_x0_pred = False
temperature = 1.
eta = eta
custom_shape = None
height, width = example["image"].shape[1:3]
split_input = height >= 128 and width >= 128
if split_input:
ks = 128
stride = 64
vqf = 4 #
model.split_input_params = {"ks": (ks, ks), "stride": (stride, stride),
"vqf": vqf,
"patch_distributed_vq": True,
"tie_braker": False,
"clip_max_weight": 0.5,
"clip_min_weight": 0.01,
"clip_max_tie_weight": 0.5,
"clip_min_tie_weight": 0.01}
else:
if hasattr(model, "split_input_params"):
delattr(model, "split_input_params")
x_t = None
logs = None
for n in range(n_runs):
if custom_shape is not None:
x_t = torch.randn(1, custom_shape[1], custom_shape[2], custom_shape[3]).to(model.device)
x_t = repeat(x_t, '1 c h w -> b c h w', b=custom_shape[0])
logs = make_convolutional_sample(example, model,
custom_steps=custom_steps,
eta=eta, quantize_x0=False,
custom_shape=custom_shape,
temperature=temperature, noise_dropout=0.,
corrector=guider, corrector_kwargs=ckwargs, x_T=x_t,
ddim_use_x0_pred=ddim_use_x0_pred
)
return logs
def super_resolution(self, image, steps=100, target_scale=2, half_attention=False):
model = self.load_model_from_config(half_attention)
# Run settings
diffusion_steps = int(steps)
eta = 1.0
down_sample_method = 'Lanczos'
gc.collect()
torch.cuda.empty_cache()
im_og = image
width_og, height_og = im_og.size
# If we can adjust the max upscale size, then the 4 below should be our variable
down_sample_rate = target_scale / 4
wd = width_og * down_sample_rate
hd = height_og * down_sample_rate
width_downsampled_pre = int(np.ceil(wd))
height_downsampled_pre = int(np.ceil(hd))
if down_sample_rate != 1:
print(
f'Downsampling from [{width_og}, {height_og}] to [{width_downsampled_pre}, {height_downsampled_pre}]')
im_og = im_og.resize((width_downsampled_pre, height_downsampled_pre), Image.LANCZOS)
else:
print(f"Down sample rate is 1 from {target_scale} / 4 (Not downsampling)")
# pad width and height to multiples of 64, pads with the edge values of image to avoid artifacts
pad_w, pad_h = np.max(((2, 2), np.ceil(np.array(im_og.size) / 64).astype(int)), axis=0) * 64 - im_og.size
im_padded = Image.fromarray(np.pad(np.array(im_og), ((0, pad_h), (0, pad_w), (0, 0)), mode='edge'))
logs = self.run(model["model"], im_padded, diffusion_steps, eta)
sample = logs["sample"]
sample = sample.detach().cpu()
sample = torch.clamp(sample, -1., 1.)
sample = (sample + 1.) / 2. * 255
sample = sample.numpy().astype(np.uint8)
sample = np.transpose(sample, (0, 2, 3, 1))
a = Image.fromarray(sample[0])
# remove padding
a = a.crop((0, 0) + tuple(np.array(im_og.size) * 4))
del model
gc.collect()
torch.cuda.empty_cache()
return a
def get_cond(selected_path):
example = dict()
up_f = 4
c = selected_path.convert('RGB')
c = torch.unsqueeze(torchvision.transforms.ToTensor()(c), 0)
c_up = torchvision.transforms.functional.resize(c, size=[up_f * c.shape[2], up_f * c.shape[3]],
antialias=True)
c_up = rearrange(c_up, '1 c h w -> 1 h w c')
c = rearrange(c, '1 c h w -> 1 h w c')
c = 2. * c - 1.
c = c.to(torch.device("cuda"))
example["LR_image"] = c
example["image"] = c_up
return example
@torch.no_grad()
def convsample_ddim(model, cond, steps, shape, eta=1.0, callback=None, normals_sequence=None,
mask=None, x0=None, quantize_x0=False, temperature=1., score_corrector=None,
corrector_kwargs=None, x_t=None
):
ddim = DDIMSampler(model)
bs = shape[0]
shape = shape[1:]
print(f"Sampling with eta = {eta}; steps: {steps}")
samples, intermediates = ddim.sample(steps, batch_size=bs, shape=shape, conditioning=cond, callback=callback,
normals_sequence=normals_sequence, quantize_x0=quantize_x0, eta=eta,
mask=mask, x0=x0, temperature=temperature, verbose=False,
score_corrector=score_corrector,
corrector_kwargs=corrector_kwargs, x_t=x_t)
return samples, intermediates
@torch.no_grad()
def make_convolutional_sample(batch, model, custom_steps=None, eta=1.0, quantize_x0=False, custom_shape=None, temperature=1., noise_dropout=0., corrector=None,
corrector_kwargs=None, x_T=None, ddim_use_x0_pred=False):
log = dict()
z, c, x, xrec, xc = model.get_input(batch, model.first_stage_key,
return_first_stage_outputs=True,
force_c_encode=not (hasattr(model, 'split_input_params')
and model.cond_stage_key == 'coordinates_bbox'),
return_original_cond=True)
if custom_shape is not None:
z = torch.randn(custom_shape)
print(f"Generating {custom_shape[0]} samples of shape {custom_shape[1:]}")
z0 = None
log["input"] = x
log["reconstruction"] = xrec
if ismap(xc):
log["original_conditioning"] = model.to_rgb(xc)
if hasattr(model, 'cond_stage_key'):
log[model.cond_stage_key] = model.to_rgb(xc)
else:
log["original_conditioning"] = xc if xc is not None else torch.zeros_like(x)
if model.cond_stage_model:
log[model.cond_stage_key] = xc if xc is not None else torch.zeros_like(x)
if model.cond_stage_key == 'class_label':
log[model.cond_stage_key] = xc[model.cond_stage_key]
with model.ema_scope("Plotting"):
t0 = time.time()
sample, intermediates = convsample_ddim(model, c, steps=custom_steps, shape=z.shape,
eta=eta,
quantize_x0=quantize_x0, mask=None, x0=z0,
temperature=temperature, score_corrector=corrector, corrector_kwargs=corrector_kwargs,
x_t=x_T)
t1 = time.time()
if ddim_use_x0_pred:
sample = intermediates['pred_x0'][-1]
x_sample = model.decode_first_stage(sample)
try:
x_sample_noquant = model.decode_first_stage(sample, force_not_quantize=True)
log["sample_noquant"] = x_sample_noquant
log["sample_diff"] = torch.abs(x_sample_noquant - x_sample)
except:
pass
log["sample"] = x_sample
log["time"] = t1 - t0
return log
<|code_end|>
|
[Bug]: x/y Checkpoint plot giving wrong results - wrong ckpt weight's loaded from cache if checkpoint cache is enabled in settings (>2)
### Is there an existing issue for this?
- [X] I have searched the existing issues and checked the recent builds/commits
### What happened?
Using x/y plot with multiple checkpoints and with the same seed, does not generate consistent results.
It seems that the checkpoint order gets swapped around with each button press.
And the currently selected checkpoints also plays a role, it seems the weight from the currently selected checkpoint is mixed into the plot.
The result changes depending on which checkpoint is currently selected, even though, the selected checkpoint should not matter in this case, since it should only use the weights from the x/y plot checkpoints
### Steps to reproduce the problem
1. in the settings, set _Checkpoints to cache in RAM_ to a value >= 2
2. use two checkpoints in an x/y plot:

3. use a different checkpoint as the currently selected in the main drop down

4. generate an image multiple times, the labels stay in the same order, but the images swap with each new button press



### What should have happened?
Result should be consistent.
Currently selected checkpoint should not matter.
### Commit where the problem happens
804d9fb83d0c63ca3acd36378707ce47b8f12599
### What platforms do you use to access UI ?
Windows
### What browsers do you use to access the UI ?
Mozilla Firefox
### Command Line Arguments
```Shell
Launching Web UI with arguments: --xformers --force-enable-xformers
```
### Additional information, context and logs
Checkpoints to cache in RAM needs to be set to >= 2
|
modules/sd_models.py
<|code_start|>import collections
import os.path
import sys
import gc
from collections import namedtuple
import torch
import re
from omegaconf import OmegaConf
from ldm.util import instantiate_from_config
from modules import shared, modelloader, devices, script_callbacks, sd_vae
from modules.paths import models_path
from modules.sd_hijack_inpainting import do_inpainting_hijack, should_hijack_inpainting
model_dir = "Stable-diffusion"
model_path = os.path.abspath(os.path.join(models_path, model_dir))
CheckpointInfo = namedtuple("CheckpointInfo", ['filename', 'title', 'hash', 'model_name', 'config'])
checkpoints_list = {}
checkpoints_loaded = collections.OrderedDict()
try:
# this silences the annoying "Some weights of the model checkpoint were not used when initializing..." message at start.
from transformers import logging, CLIPModel
logging.set_verbosity_error()
except Exception:
pass
def setup_model():
if not os.path.exists(model_path):
os.makedirs(model_path)
list_models()
def checkpoint_tiles():
convert = lambda name: int(name) if name.isdigit() else name.lower()
alphanumeric_key = lambda key: [convert(c) for c in re.split('([0-9]+)', key)]
return sorted([x.title for x in checkpoints_list.values()], key = alphanumeric_key)
def list_models():
checkpoints_list.clear()
model_list = modelloader.load_models(model_path=model_path, command_path=shared.cmd_opts.ckpt_dir, ext_filter=[".ckpt"])
def modeltitle(path, shorthash):
abspath = os.path.abspath(path)
if shared.cmd_opts.ckpt_dir is not None and abspath.startswith(shared.cmd_opts.ckpt_dir):
name = abspath.replace(shared.cmd_opts.ckpt_dir, '')
elif abspath.startswith(model_path):
name = abspath.replace(model_path, '')
else:
name = os.path.basename(path)
if name.startswith("\\") or name.startswith("/"):
name = name[1:]
shortname = os.path.splitext(name.replace("/", "_").replace("\\", "_"))[0]
return f'{name} [{shorthash}]', shortname
cmd_ckpt = shared.cmd_opts.ckpt
if os.path.exists(cmd_ckpt):
h = model_hash(cmd_ckpt)
title, short_model_name = modeltitle(cmd_ckpt, h)
checkpoints_list[title] = CheckpointInfo(cmd_ckpt, title, h, short_model_name, shared.cmd_opts.config)
shared.opts.data['sd_model_checkpoint'] = title
elif cmd_ckpt is not None and cmd_ckpt != shared.default_sd_model_file:
print(f"Checkpoint in --ckpt argument not found (Possible it was moved to {model_path}: {cmd_ckpt}", file=sys.stderr)
for filename in model_list:
h = model_hash(filename)
title, short_model_name = modeltitle(filename, h)
basename, _ = os.path.splitext(filename)
config = basename + ".yaml"
if not os.path.exists(config):
config = shared.cmd_opts.config
checkpoints_list[title] = CheckpointInfo(filename, title, h, short_model_name, config)
def get_closet_checkpoint_match(searchString):
applicable = sorted([info for info in checkpoints_list.values() if searchString in info.title], key = lambda x:len(x.title))
if len(applicable) > 0:
return applicable[0]
return None
def model_hash(filename):
try:
with open(filename, "rb") as file:
import hashlib
m = hashlib.sha256()
file.seek(0x100000)
m.update(file.read(0x10000))
return m.hexdigest()[0:8]
except FileNotFoundError:
return 'NOFILE'
def select_checkpoint():
model_checkpoint = shared.opts.sd_model_checkpoint
checkpoint_info = checkpoints_list.get(model_checkpoint, None)
if checkpoint_info is not None:
return checkpoint_info
if len(checkpoints_list) == 0:
print(f"No checkpoints found. When searching for checkpoints, looked at:", file=sys.stderr)
if shared.cmd_opts.ckpt is not None:
print(f" - file {os.path.abspath(shared.cmd_opts.ckpt)}", file=sys.stderr)
print(f" - directory {model_path}", file=sys.stderr)
if shared.cmd_opts.ckpt_dir is not None:
print(f" - directory {os.path.abspath(shared.cmd_opts.ckpt_dir)}", file=sys.stderr)
print(f"Can't run without a checkpoint. Find and place a .ckpt file into any of those locations. The program will exit.", file=sys.stderr)
exit(1)
checkpoint_info = next(iter(checkpoints_list.values()))
if model_checkpoint is not None:
print(f"Checkpoint {model_checkpoint} not found; loading fallback {checkpoint_info.title}", file=sys.stderr)
return checkpoint_info
chckpoint_dict_replacements = {
'cond_stage_model.transformer.embeddings.': 'cond_stage_model.transformer.text_model.embeddings.',
'cond_stage_model.transformer.encoder.': 'cond_stage_model.transformer.text_model.encoder.',
'cond_stage_model.transformer.final_layer_norm.': 'cond_stage_model.transformer.text_model.final_layer_norm.',
}
def transform_checkpoint_dict_key(k):
for text, replacement in chckpoint_dict_replacements.items():
if k.startswith(text):
k = replacement + k[len(text):]
return k
def get_state_dict_from_checkpoint(pl_sd):
if "state_dict" in pl_sd:
pl_sd = pl_sd["state_dict"]
sd = {}
for k, v in pl_sd.items():
new_key = transform_checkpoint_dict_key(k)
if new_key is not None:
sd[new_key] = v
pl_sd.clear()
pl_sd.update(sd)
return pl_sd
def load_model_weights(model, checkpoint_info, vae_file="auto"):
checkpoint_file = checkpoint_info.filename
sd_model_hash = checkpoint_info.hash
if shared.opts.sd_checkpoint_cache > 0 and hasattr(model, "sd_checkpoint_info"):
sd_vae.restore_base_vae(model)
checkpoints_loaded[model.sd_checkpoint_info] = model.state_dict().copy()
vae_file = sd_vae.resolve_vae(checkpoint_file, vae_file=vae_file)
if checkpoint_info not in checkpoints_loaded:
print(f"Loading weights [{sd_model_hash}] from {checkpoint_file}")
pl_sd = torch.load(checkpoint_file, map_location=shared.weight_load_location)
if "global_step" in pl_sd:
print(f"Global Step: {pl_sd['global_step']}")
sd = get_state_dict_from_checkpoint(pl_sd)
del pl_sd
model.load_state_dict(sd, strict=False)
del sd
if shared.cmd_opts.opt_channelslast:
model.to(memory_format=torch.channels_last)
if not shared.cmd_opts.no_half:
vae = model.first_stage_model
# with --no-half-vae, remove VAE from model when doing half() to prevent its weights from being converted to float16
if shared.cmd_opts.no_half_vae:
model.first_stage_model = None
model.half()
model.first_stage_model = vae
devices.dtype = torch.float32 if shared.cmd_opts.no_half else torch.float16
devices.dtype_vae = torch.float32 if shared.cmd_opts.no_half or shared.cmd_opts.no_half_vae else torch.float16
model.first_stage_model.to(devices.dtype_vae)
else:
vae_name = sd_vae.get_filename(vae_file) if vae_file else None
vae_message = f" with {vae_name} VAE" if vae_name else ""
print(f"Loading weights [{sd_model_hash}]{vae_message} from cache")
model.load_state_dict(checkpoints_loaded[checkpoint_info])
if shared.opts.sd_checkpoint_cache > 0:
while len(checkpoints_loaded) > shared.opts.sd_checkpoint_cache:
checkpoints_loaded.popitem(last=False) # LRU
model.sd_model_hash = sd_model_hash
model.sd_model_checkpoint = checkpoint_file
model.sd_checkpoint_info = checkpoint_info
sd_vae.load_vae(model, vae_file)
def load_model(checkpoint_info=None):
from modules import lowvram, sd_hijack
checkpoint_info = checkpoint_info or select_checkpoint()
if checkpoint_info.config != shared.cmd_opts.config:
print(f"Loading config from: {checkpoint_info.config}")
if shared.sd_model:
sd_hijack.model_hijack.undo_hijack(shared.sd_model)
shared.sd_model = None
gc.collect()
devices.torch_gc()
sd_config = OmegaConf.load(checkpoint_info.config)
if should_hijack_inpainting(checkpoint_info):
# Hardcoded config for now...
sd_config.model.target = "ldm.models.diffusion.ddpm.LatentInpaintDiffusion"
sd_config.model.params.use_ema = False
sd_config.model.params.conditioning_key = "hybrid"
sd_config.model.params.unet_config.params.in_channels = 9
# Create a "fake" config with a different name so that we know to unload it when switching models.
checkpoint_info = checkpoint_info._replace(config=checkpoint_info.config.replace(".yaml", "-inpainting.yaml"))
do_inpainting_hijack()
sd_model = instantiate_from_config(sd_config.model)
load_model_weights(sd_model, checkpoint_info)
if shared.cmd_opts.lowvram or shared.cmd_opts.medvram:
lowvram.setup_for_low_vram(sd_model, shared.cmd_opts.medvram)
else:
sd_model.to(shared.device)
sd_hijack.model_hijack.hijack(sd_model)
sd_model.eval()
shared.sd_model = sd_model
script_callbacks.model_loaded_callback(sd_model)
print(f"Model loaded.")
return sd_model
def reload_model_weights(sd_model=None, info=None):
from modules import lowvram, devices, sd_hijack
checkpoint_info = info or select_checkpoint()
if not sd_model:
sd_model = shared.sd_model
if sd_model.sd_model_checkpoint == checkpoint_info.filename:
return
if sd_model.sd_checkpoint_info.config != checkpoint_info.config or should_hijack_inpainting(checkpoint_info) != should_hijack_inpainting(sd_model.sd_checkpoint_info):
del sd_model
checkpoints_loaded.clear()
load_model(checkpoint_info)
return shared.sd_model
if shared.cmd_opts.lowvram or shared.cmd_opts.medvram:
lowvram.send_everything_to_cpu()
else:
sd_model.to(devices.cpu)
sd_hijack.model_hijack.undo_hijack(sd_model)
load_model_weights(sd_model, checkpoint_info)
sd_hijack.model_hijack.hijack(sd_model)
script_callbacks.model_loaded_callback(sd_model)
if not shared.cmd_opts.lowvram and not shared.cmd_opts.medvram:
sd_model.to(devices.device)
print(f"Weights loaded.")
return sd_model
<|code_end|>
|
modules/sd_models.py
<|code_start|>import collections
import os.path
import sys
import gc
from collections import namedtuple
import torch
import re
from omegaconf import OmegaConf
from ldm.util import instantiate_from_config
from modules import shared, modelloader, devices, script_callbacks, sd_vae
from modules.paths import models_path
from modules.sd_hijack_inpainting import do_inpainting_hijack, should_hijack_inpainting
model_dir = "Stable-diffusion"
model_path = os.path.abspath(os.path.join(models_path, model_dir))
CheckpointInfo = namedtuple("CheckpointInfo", ['filename', 'title', 'hash', 'model_name', 'config'])
checkpoints_list = {}
checkpoints_loaded = collections.OrderedDict()
try:
# this silences the annoying "Some weights of the model checkpoint were not used when initializing..." message at start.
from transformers import logging, CLIPModel
logging.set_verbosity_error()
except Exception:
pass
def setup_model():
if not os.path.exists(model_path):
os.makedirs(model_path)
list_models()
def checkpoint_tiles():
convert = lambda name: int(name) if name.isdigit() else name.lower()
alphanumeric_key = lambda key: [convert(c) for c in re.split('([0-9]+)', key)]
return sorted([x.title for x in checkpoints_list.values()], key = alphanumeric_key)
def list_models():
checkpoints_list.clear()
model_list = modelloader.load_models(model_path=model_path, command_path=shared.cmd_opts.ckpt_dir, ext_filter=[".ckpt"])
def modeltitle(path, shorthash):
abspath = os.path.abspath(path)
if shared.cmd_opts.ckpt_dir is not None and abspath.startswith(shared.cmd_opts.ckpt_dir):
name = abspath.replace(shared.cmd_opts.ckpt_dir, '')
elif abspath.startswith(model_path):
name = abspath.replace(model_path, '')
else:
name = os.path.basename(path)
if name.startswith("\\") or name.startswith("/"):
name = name[1:]
shortname = os.path.splitext(name.replace("/", "_").replace("\\", "_"))[0]
return f'{name} [{shorthash}]', shortname
cmd_ckpt = shared.cmd_opts.ckpt
if os.path.exists(cmd_ckpt):
h = model_hash(cmd_ckpt)
title, short_model_name = modeltitle(cmd_ckpt, h)
checkpoints_list[title] = CheckpointInfo(cmd_ckpt, title, h, short_model_name, shared.cmd_opts.config)
shared.opts.data['sd_model_checkpoint'] = title
elif cmd_ckpt is not None and cmd_ckpt != shared.default_sd_model_file:
print(f"Checkpoint in --ckpt argument not found (Possible it was moved to {model_path}: {cmd_ckpt}", file=sys.stderr)
for filename in model_list:
h = model_hash(filename)
title, short_model_name = modeltitle(filename, h)
basename, _ = os.path.splitext(filename)
config = basename + ".yaml"
if not os.path.exists(config):
config = shared.cmd_opts.config
checkpoints_list[title] = CheckpointInfo(filename, title, h, short_model_name, config)
def get_closet_checkpoint_match(searchString):
applicable = sorted([info for info in checkpoints_list.values() if searchString in info.title], key = lambda x:len(x.title))
if len(applicable) > 0:
return applicable[0]
return None
def model_hash(filename):
try:
with open(filename, "rb") as file:
import hashlib
m = hashlib.sha256()
file.seek(0x100000)
m.update(file.read(0x10000))
return m.hexdigest()[0:8]
except FileNotFoundError:
return 'NOFILE'
def select_checkpoint():
model_checkpoint = shared.opts.sd_model_checkpoint
checkpoint_info = checkpoints_list.get(model_checkpoint, None)
if checkpoint_info is not None:
return checkpoint_info
if len(checkpoints_list) == 0:
print(f"No checkpoints found. When searching for checkpoints, looked at:", file=sys.stderr)
if shared.cmd_opts.ckpt is not None:
print(f" - file {os.path.abspath(shared.cmd_opts.ckpt)}", file=sys.stderr)
print(f" - directory {model_path}", file=sys.stderr)
if shared.cmd_opts.ckpt_dir is not None:
print(f" - directory {os.path.abspath(shared.cmd_opts.ckpt_dir)}", file=sys.stderr)
print(f"Can't run without a checkpoint. Find and place a .ckpt file into any of those locations. The program will exit.", file=sys.stderr)
exit(1)
checkpoint_info = next(iter(checkpoints_list.values()))
if model_checkpoint is not None:
print(f"Checkpoint {model_checkpoint} not found; loading fallback {checkpoint_info.title}", file=sys.stderr)
return checkpoint_info
chckpoint_dict_replacements = {
'cond_stage_model.transformer.embeddings.': 'cond_stage_model.transformer.text_model.embeddings.',
'cond_stage_model.transformer.encoder.': 'cond_stage_model.transformer.text_model.encoder.',
'cond_stage_model.transformer.final_layer_norm.': 'cond_stage_model.transformer.text_model.final_layer_norm.',
}
def transform_checkpoint_dict_key(k):
for text, replacement in chckpoint_dict_replacements.items():
if k.startswith(text):
k = replacement + k[len(text):]
return k
def get_state_dict_from_checkpoint(pl_sd):
if "state_dict" in pl_sd:
pl_sd = pl_sd["state_dict"]
sd = {}
for k, v in pl_sd.items():
new_key = transform_checkpoint_dict_key(k)
if new_key is not None:
sd[new_key] = v
pl_sd.clear()
pl_sd.update(sd)
return pl_sd
def load_model_weights(model, checkpoint_info, vae_file="auto"):
checkpoint_file = checkpoint_info.filename
sd_model_hash = checkpoint_info.hash
cache_enabled = shared.opts.sd_checkpoint_cache > 0
if cache_enabled:
sd_vae.restore_base_vae(model)
vae_file = sd_vae.resolve_vae(checkpoint_file, vae_file=vae_file)
if cache_enabled and checkpoint_info in checkpoints_loaded:
# use checkpoint cache
vae_name = sd_vae.get_filename(vae_file) if vae_file else None
vae_message = f" with {vae_name} VAE" if vae_name else ""
print(f"Loading weights [{sd_model_hash}]{vae_message} from cache")
model.load_state_dict(checkpoints_loaded[checkpoint_info])
else:
# load from file
print(f"Loading weights [{sd_model_hash}] from {checkpoint_file}")
pl_sd = torch.load(checkpoint_file, map_location=shared.weight_load_location)
if "global_step" in pl_sd:
print(f"Global Step: {pl_sd['global_step']}")
sd = get_state_dict_from_checkpoint(pl_sd)
del pl_sd
model.load_state_dict(sd, strict=False)
del sd
if cache_enabled:
# cache newly loaded model
checkpoints_loaded[checkpoint_info] = model.state_dict().copy()
if shared.cmd_opts.opt_channelslast:
model.to(memory_format=torch.channels_last)
if not shared.cmd_opts.no_half:
vae = model.first_stage_model
# with --no-half-vae, remove VAE from model when doing half() to prevent its weights from being converted to float16
if shared.cmd_opts.no_half_vae:
model.first_stage_model = None
model.half()
model.first_stage_model = vae
devices.dtype = torch.float32 if shared.cmd_opts.no_half else torch.float16
devices.dtype_vae = torch.float32 if shared.cmd_opts.no_half or shared.cmd_opts.no_half_vae else torch.float16
model.first_stage_model.to(devices.dtype_vae)
# clean up cache if limit is reached
if cache_enabled:
while len(checkpoints_loaded) > shared.opts.sd_checkpoint_cache + 1: # we need to count the current model
checkpoints_loaded.popitem(last=False) # LRU
model.sd_model_hash = sd_model_hash
model.sd_model_checkpoint = checkpoint_file
model.sd_checkpoint_info = checkpoint_info
sd_vae.load_vae(model, vae_file)
def load_model(checkpoint_info=None):
from modules import lowvram, sd_hijack
checkpoint_info = checkpoint_info or select_checkpoint()
if checkpoint_info.config != shared.cmd_opts.config:
print(f"Loading config from: {checkpoint_info.config}")
if shared.sd_model:
sd_hijack.model_hijack.undo_hijack(shared.sd_model)
shared.sd_model = None
gc.collect()
devices.torch_gc()
sd_config = OmegaConf.load(checkpoint_info.config)
if should_hijack_inpainting(checkpoint_info):
# Hardcoded config for now...
sd_config.model.target = "ldm.models.diffusion.ddpm.LatentInpaintDiffusion"
sd_config.model.params.use_ema = False
sd_config.model.params.conditioning_key = "hybrid"
sd_config.model.params.unet_config.params.in_channels = 9
# Create a "fake" config with a different name so that we know to unload it when switching models.
checkpoint_info = checkpoint_info._replace(config=checkpoint_info.config.replace(".yaml", "-inpainting.yaml"))
do_inpainting_hijack()
sd_model = instantiate_from_config(sd_config.model)
load_model_weights(sd_model, checkpoint_info)
if shared.cmd_opts.lowvram or shared.cmd_opts.medvram:
lowvram.setup_for_low_vram(sd_model, shared.cmd_opts.medvram)
else:
sd_model.to(shared.device)
sd_hijack.model_hijack.hijack(sd_model)
sd_model.eval()
shared.sd_model = sd_model
script_callbacks.model_loaded_callback(sd_model)
print(f"Model loaded.")
return sd_model
def reload_model_weights(sd_model=None, info=None):
from modules import lowvram, devices, sd_hijack
checkpoint_info = info or select_checkpoint()
if not sd_model:
sd_model = shared.sd_model
if sd_model.sd_model_checkpoint == checkpoint_info.filename:
return
if sd_model.sd_checkpoint_info.config != checkpoint_info.config or should_hijack_inpainting(checkpoint_info) != should_hijack_inpainting(sd_model.sd_checkpoint_info):
del sd_model
checkpoints_loaded.clear()
load_model(checkpoint_info)
return shared.sd_model
if shared.cmd_opts.lowvram or shared.cmd_opts.medvram:
lowvram.send_everything_to_cpu()
else:
sd_model.to(devices.cpu)
sd_hijack.model_hijack.undo_hijack(sd_model)
load_model_weights(sd_model, checkpoint_info)
sd_hijack.model_hijack.hijack(sd_model)
script_callbacks.model_loaded_callback(sd_model)
if not shared.cmd_opts.lowvram and not shared.cmd_opts.medvram:
sd_model.to(devices.device)
print(f"Weights loaded.")
return sd_model
<|code_end|>
|
[Bug]: Cannot switch checkpoints
### Is there an existing issue for this?
- [X] I have searched the existing issues and checked the recent builds/commits
### What happened?
I cannot change the checkpoint in the WebUI anymore since updating today. This is the error message I get:
LatentDiffusion: Running in eps-prediction mode
Traceback (most recent call last):
File "E:\sd\stable-diffusion-webui\venv\lib\site-packages\gradio\routes.py", line 284, in run_predict
output = await app.blocks.process_api(
File "E:\sd\stable-diffusion-webui\venv\lib\site-packages\gradio\blocks.py", line 982, in process_api
result = await self.call_function(fn_index, inputs, iterator)
File "E:\sd\stable-diffusion-webui\venv\lib\site-packages\gradio\blocks.py", line 824, in call_function
prediction = await anyio.to_thread.run_sync(
File "E:\sd\stable-diffusion-webui\venv\lib\site-packages\anyio\to_thread.py", line 31, in run_sync
return await get_asynclib().run_sync_in_worker_thread(
File "E:\sd\stable-diffusion-webui\venv\lib\site-packages\anyio\_backends\_asyncio.py", line 937, in run_sync_in_worker_thread
return await future
File "E:\sd\stable-diffusion-webui\venv\lib\site-packages\anyio\_backends\_asyncio.py", line 867, in run
result = context.run(func, *args)
File "E:\sd\stable-diffusion-webui\modules\ui.py", line 443, in update_token_counter
tokens, token_count, max_length = max([model_hijack.tokenize(prompt) for prompt in prompts], key=lambda args: args[1])
File "E:\sd\stable-diffusion-webui\modules\ui.py", line 443, in <listcomp>
tokens, token_count, max_length = max([model_hijack.tokenize(prompt) for prompt in prompts], key=lambda args: args[1])
File "E:\sd\stable-diffusion-webui\modules\sd_hijack.py", line 116, in tokenize
_, remade_batch_tokens, _, _, _, token_count = self.clip.process_text([text])
AttributeError: 'NoneType' object has no attribute 'process_text'
DiffusionWrapper has 859.52 M params.
making attention of type 'vanilla' with 512 in_channels
Working with z of shape (1, 4, 32, 32) = 4096 dimensions.
making attention of type 'vanilla' with 512 in_channels
Traceback (most recent call last):
File "E:\sd\stable-diffusion-webui\venv\lib\site-packages\gradio\routes.py", line 284, in run_predict
output = await app.blocks.process_api(
File "E:\sd\stable-diffusion-webui\venv\lib\site-packages\gradio\blocks.py", line 982, in process_api
result = await self.call_function(fn_index, inputs, iterator)
File "E:\sd\stable-diffusion-webui\venv\lib\site-packages\gradio\blocks.py", line 824, in call_function
prediction = await anyio.to_thread.run_sync(
File "E:\sd\stable-diffusion-webui\venv\lib\site-packages\anyio\to_thread.py", line 31, in run_sync
return await get_asynclib().run_sync_in_worker_thread(
File "E:\sd\stable-diffusion-webui\venv\lib\site-packages\anyio\_backends\_asyncio.py", line 937, in run_sync_in_worker_thread
return await future
File "E:\sd\stable-diffusion-webui\venv\lib\site-packages\anyio\_backends\_asyncio.py", line 867, in run
result = context.run(func, *args)
File "E:\sd\stable-diffusion-webui\modules\ui.py", line 1662, in <lambda>
fn=lambda value, k=k: run_settings_single(value, key=k),
File "E:\sd\stable-diffusion-webui\modules\ui.py", line 1504, in run_settings_single
opts.data_labels[key].onchange()
File "E:\sd\stable-diffusion-webui\webui.py", line 41, in f
res = func(*args, **kwargs)
File "E:\sd\stable-diffusion-webui\webui.py", line 83, in <lambda>
shared.opts.onchange("sd_model_checkpoint", wrap_queued_call(lambda: modules.sd_models.reload_model_weights()))
File "E:\sd\stable-diffusion-webui\modules\sd_models.py", line 285, in reload_model_weights
load_model(checkpoint_info)
File "E:\sd\stable-diffusion-webui\modules\sd_models.py", line 254, in load_model
load_model_weights(sd_model, checkpoint_info)
File "E:\sd\stable-diffusion-webui\modules\sd_models.py", line 169, in load_model_weights
sd_vae.restore_base_vae(model)
File "E:\sd\stable-diffusion-webui\modules\sd_vae.py", line 54, in restore_base_vae
if base_vae is not None and checkpoint_info == model.sd_checkpoint_info:
File "E:\sd\stable-diffusion-webui\venv\lib\site-packages\torch\nn\modules\module.py", line 1207, in __getattr__
raise AttributeError("'{}' object has no attribute '{}'".format(
AttributeError: 'LatentDiffusion' object has no attribute 'sd_checkpoint_info'
Then, I cannot generate any images. I get this error message:
Error completing request
Arguments: ('ppp', '', 'None', 'None', 36, 2, False, False, 1, 6, 7, -1.0, -1.0, 0, 0, 0, False, 512, 512, False, 0.7, 0, 0, 0, 0.9, 5, '0.0001', False, 'None', '', 0.1, False, '<div id="dynamic-prompting">\n <h3><strong>Combinations</strong></h3>\n\n Choose a number of terms from a list, in this case we choose two artists: \n <code class="codeblock">{2$$artist1|artist2|artist3}</code><br/>\n\n If $$ is not provided, then 1$$ is assumed.<br/><br/>\n\n If the chosen number of terms is greater than the available terms, then some terms will be duplicated, otherwise chosen terms will be unique. This is useful in the case of wildcards, e.g.\n <code class="codeblock">{2$$__artist__}</code> is equivalent to <code class="codeblock">{2$$__artist__|__artist__}</code><br/><br/>\n\n A range can be provided:\n <code class="codeblock">{1-3$$artist1|artist2|artist3}</code><br/>\n In this case, a random number of artists between 1 and 3 is chosen.<br/><br/>\n\n Wildcards can be used and the joiner can also be specified:\n <code class="codeblock">{{1-$$and$$__adjective__}}</code><br/>\n\n Here, a random number between 1 and 3 words from adjective.txt will be chosen and joined together with the word \'and\' instead of the default comma.\n\n <br/><br/>\n\n <h3><strong>Wildcards</strong></h3>\n \n\n <br/>\n If the groups wont drop down click <strong onclick="check_collapsibles()" style="cursor: pointer">here</strong> to fix the issue.\n\n <br/><br/>\n\n <code class="codeblock">WILDCARD_DIR: E:\\sd\\stable-diffusion-webui\\extensions\\sd-dynamic-prompts\\wildcards</code><br/>\n <small onload="check_collapsibles()">You can add more wildcards by creating a text file with one term per line and name is mywildcards.txt. Place it in E:\\sd\\stable-diffusion-webui\\extensions\\sd-dynamic-prompts\\wildcards. <code class="codeblock">__<folder>/mywildcards__</code> will then become available.</small>\n</div>\n\n', False, 1, False, 100, 0.7, False, False, 'Not set', 'Not set', 'Not set', 'Not set', 'No focus', 'None', False, False, False, '', 1, '', 0, '', True, False, False, '', 'None', 30, 4, 0, 0, False, 'None', '<br>', 'None', 30, 4, 0, 0, 4, 0.4, True, 32, 1.0, 2.0, 'a painting in', 'style', 'picture frame, portrait photo', None) {}
Traceback (most recent call last):
File "E:\sd\stable-diffusion-webui\venv\lib\site-packages\gradio\routes.py", line 284, in run_predict
output = await app.blocks.process_api(
File "E:\sd\stable-diffusion-webui\venv\lib\site-packages\gradio\blocks.py", line 982, in process_api
result = await self.call_function(fn_index, inputs, iterator)
File "E:\sd\stable-diffusion-webui\venv\lib\site-packages\gradio\blocks.py", line 824, in call_function
prediction = await anyio.to_thread.run_sync(
File "E:\sd\stable-diffusion-webui\venv\lib\site-packages\anyio\to_thread.py", line 31, in run_sync
return await get_asynclib().run_sync_in_worker_thread(
File "E:\sd\stable-diffusion-webui\venv\lib\site-packages\anyio\_backends\_asyncio.py", line 937, in run_sync_in_worker_thread
return await future
File "E:\sd\stable-diffusion-webui\venv\lib\site-packages\anyio\_backends\_asyncio.py", line 867, in run
result = context.run(func, *args)
File "E:\sd\stable-diffusion-webui\modules\ui.py", line 443, in update_token_counter
tokens, token_count, max_length = max([model_hijack.tokenize(prompt) for prompt in prompts], key=lambda args: args[1])
File "E:\sd\stable-diffusion-webui\modules\ui.py", line 443, in <listcomp>
tokens, token_count, max_length = max([model_hijack.tokenize(prompt) for prompt in prompts], key=lambda args: args[1])
File "E:\sd\stable-diffusion-webui\modules\sd_hijack.py", line 116, in tokenize
_, remade_batch_tokens, _, _, _, token_count = self.clip.process_text([text])
AttributeError: 'NoneType' object has no attribute 'process_text'
Traceback (most recent call last):
File "E:\sd\stable-diffusion-webui\modules\ui.py", line 185, in f
res = list(func(*args, **kwargs))
File "E:\sd\stable-diffusion-webui\webui.py", line 54, in f
res = func(*args, **kwargs)
File "E:\sd\stable-diffusion-webui\modules\txt2img.py", line 48, in txt2img
processed = process_images(p)
File "E:\sd\stable-diffusion-webui\modules\processing.py", line 423, in process_images
res = process_images_inner(p)
File "E:\sd\stable-diffusion-webui\modules\processing.py", line 441, in process_images_inner
processed = Processed(p, [], p.seed, "")
File "E:\sd\stable-diffusion-webui\modules\processing.py", line 220, in __init__
self.sd_model_hash = shared.sd_model.sd_model_hash
AttributeError: 'NoneType' object has no attribute 'sd_model_hash'
### Steps to reproduce the problem
1. Change the checkpoint in the dropdown at the top of the WebUI.
I had the inpainting model loaded last, and now I cannot switch to any other. Then, I cannot generate.
### What should have happened?
It should have normally switched it to a checkpoint.
### Commit where the problem happens
98947d173e3f1667eba29c904f681047dea9de90
### What platforms do you use to access UI ?
Windows
### What browsers do you use to access the UI ?
Microsoft Edge
### Command Line Arguments
```Shell
--precision full --medvram --no-half --ckpt-dir "C:\SD\models" --listen --enable-insecure-extension-access --xformers --vae-path "C:\SD\moremodels\v1-5-pruned-emaonly.vae.pt" --api --cors-allow-origins=*
```
### Additional information, context and logs
It seems to be related to this PR: #4514
|
modules/sd_models.py
<|code_start|>import collections
import os.path
import sys
import gc
from collections import namedtuple
import torch
import re
from omegaconf import OmegaConf
from ldm.util import instantiate_from_config
from modules import shared, modelloader, devices, script_callbacks, sd_vae
from modules.paths import models_path
from modules.sd_hijack_inpainting import do_inpainting_hijack, should_hijack_inpainting
model_dir = "Stable-diffusion"
model_path = os.path.abspath(os.path.join(models_path, model_dir))
CheckpointInfo = namedtuple("CheckpointInfo", ['filename', 'title', 'hash', 'model_name', 'config'])
checkpoints_list = {}
checkpoints_loaded = collections.OrderedDict()
try:
# this silences the annoying "Some weights of the model checkpoint were not used when initializing..." message at start.
from transformers import logging, CLIPModel
logging.set_verbosity_error()
except Exception:
pass
def setup_model():
if not os.path.exists(model_path):
os.makedirs(model_path)
list_models()
def checkpoint_tiles():
convert = lambda name: int(name) if name.isdigit() else name.lower()
alphanumeric_key = lambda key: [convert(c) for c in re.split('([0-9]+)', key)]
return sorted([x.title for x in checkpoints_list.values()], key = alphanumeric_key)
def list_models():
checkpoints_list.clear()
model_list = modelloader.load_models(model_path=model_path, command_path=shared.cmd_opts.ckpt_dir, ext_filter=[".ckpt"])
def modeltitle(path, shorthash):
abspath = os.path.abspath(path)
if shared.cmd_opts.ckpt_dir is not None and abspath.startswith(shared.cmd_opts.ckpt_dir):
name = abspath.replace(shared.cmd_opts.ckpt_dir, '')
elif abspath.startswith(model_path):
name = abspath.replace(model_path, '')
else:
name = os.path.basename(path)
if name.startswith("\\") or name.startswith("/"):
name = name[1:]
shortname = os.path.splitext(name.replace("/", "_").replace("\\", "_"))[0]
return f'{name} [{shorthash}]', shortname
cmd_ckpt = shared.cmd_opts.ckpt
if os.path.exists(cmd_ckpt):
h = model_hash(cmd_ckpt)
title, short_model_name = modeltitle(cmd_ckpt, h)
checkpoints_list[title] = CheckpointInfo(cmd_ckpt, title, h, short_model_name, shared.cmd_opts.config)
shared.opts.data['sd_model_checkpoint'] = title
elif cmd_ckpt is not None and cmd_ckpt != shared.default_sd_model_file:
print(f"Checkpoint in --ckpt argument not found (Possible it was moved to {model_path}: {cmd_ckpt}", file=sys.stderr)
for filename in model_list:
h = model_hash(filename)
title, short_model_name = modeltitle(filename, h)
basename, _ = os.path.splitext(filename)
config = basename + ".yaml"
if not os.path.exists(config):
config = shared.cmd_opts.config
checkpoints_list[title] = CheckpointInfo(filename, title, h, short_model_name, config)
def get_closet_checkpoint_match(searchString):
applicable = sorted([info for info in checkpoints_list.values() if searchString in info.title], key = lambda x:len(x.title))
if len(applicable) > 0:
return applicable[0]
return None
def model_hash(filename):
try:
with open(filename, "rb") as file:
import hashlib
m = hashlib.sha256()
file.seek(0x100000)
m.update(file.read(0x10000))
return m.hexdigest()[0:8]
except FileNotFoundError:
return 'NOFILE'
def select_checkpoint():
model_checkpoint = shared.opts.sd_model_checkpoint
checkpoint_info = checkpoints_list.get(model_checkpoint, None)
if checkpoint_info is not None:
return checkpoint_info
if len(checkpoints_list) == 0:
print(f"No checkpoints found. When searching for checkpoints, looked at:", file=sys.stderr)
if shared.cmd_opts.ckpt is not None:
print(f" - file {os.path.abspath(shared.cmd_opts.ckpt)}", file=sys.stderr)
print(f" - directory {model_path}", file=sys.stderr)
if shared.cmd_opts.ckpt_dir is not None:
print(f" - directory {os.path.abspath(shared.cmd_opts.ckpt_dir)}", file=sys.stderr)
print(f"Can't run without a checkpoint. Find and place a .ckpt file into any of those locations. The program will exit.", file=sys.stderr)
exit(1)
checkpoint_info = next(iter(checkpoints_list.values()))
if model_checkpoint is not None:
print(f"Checkpoint {model_checkpoint} not found; loading fallback {checkpoint_info.title}", file=sys.stderr)
return checkpoint_info
chckpoint_dict_replacements = {
'cond_stage_model.transformer.embeddings.': 'cond_stage_model.transformer.text_model.embeddings.',
'cond_stage_model.transformer.encoder.': 'cond_stage_model.transformer.text_model.encoder.',
'cond_stage_model.transformer.final_layer_norm.': 'cond_stage_model.transformer.text_model.final_layer_norm.',
}
def transform_checkpoint_dict_key(k):
for text, replacement in chckpoint_dict_replacements.items():
if k.startswith(text):
k = replacement + k[len(text):]
return k
def get_state_dict_from_checkpoint(pl_sd):
if "state_dict" in pl_sd:
pl_sd = pl_sd["state_dict"]
sd = {}
for k, v in pl_sd.items():
new_key = transform_checkpoint_dict_key(k)
if new_key is not None:
sd[new_key] = v
pl_sd.clear()
pl_sd.update(sd)
return pl_sd
def load_model_weights(model, checkpoint_info, vae_file="auto"):
checkpoint_file = checkpoint_info.filename
sd_model_hash = checkpoint_info.hash
cache_enabled = shared.opts.sd_checkpoint_cache > 0
if cache_enabled:
sd_vae.restore_base_vae(model)
vae_file = sd_vae.resolve_vae(checkpoint_file, vae_file=vae_file)
if cache_enabled and checkpoint_info in checkpoints_loaded:
# use checkpoint cache
vae_name = sd_vae.get_filename(vae_file) if vae_file else None
vae_message = f" with {vae_name} VAE" if vae_name else ""
print(f"Loading weights [{sd_model_hash}]{vae_message} from cache")
model.load_state_dict(checkpoints_loaded[checkpoint_info])
else:
# load from file
print(f"Loading weights [{sd_model_hash}] from {checkpoint_file}")
pl_sd = torch.load(checkpoint_file, map_location=shared.weight_load_location)
if "global_step" in pl_sd:
print(f"Global Step: {pl_sd['global_step']}")
sd = get_state_dict_from_checkpoint(pl_sd)
del pl_sd
model.load_state_dict(sd, strict=False)
del sd
if cache_enabled:
# cache newly loaded model
checkpoints_loaded[checkpoint_info] = model.state_dict().copy()
if shared.cmd_opts.opt_channelslast:
model.to(memory_format=torch.channels_last)
if not shared.cmd_opts.no_half:
vae = model.first_stage_model
# with --no-half-vae, remove VAE from model when doing half() to prevent its weights from being converted to float16
if shared.cmd_opts.no_half_vae:
model.first_stage_model = None
model.half()
model.first_stage_model = vae
devices.dtype = torch.float32 if shared.cmd_opts.no_half else torch.float16
devices.dtype_vae = torch.float32 if shared.cmd_opts.no_half or shared.cmd_opts.no_half_vae else torch.float16
model.first_stage_model.to(devices.dtype_vae)
# clean up cache if limit is reached
if cache_enabled:
while len(checkpoints_loaded) > shared.opts.sd_checkpoint_cache + 1: # we need to count the current model
checkpoints_loaded.popitem(last=False) # LRU
model.sd_model_hash = sd_model_hash
model.sd_model_checkpoint = checkpoint_file
model.sd_checkpoint_info = checkpoint_info
sd_vae.load_vae(model, vae_file)
def load_model(checkpoint_info=None):
from modules import lowvram, sd_hijack
checkpoint_info = checkpoint_info or select_checkpoint()
if checkpoint_info.config != shared.cmd_opts.config:
print(f"Loading config from: {checkpoint_info.config}")
if shared.sd_model:
sd_hijack.model_hijack.undo_hijack(shared.sd_model)
shared.sd_model = None
gc.collect()
devices.torch_gc()
sd_config = OmegaConf.load(checkpoint_info.config)
if should_hijack_inpainting(checkpoint_info):
# Hardcoded config for now...
sd_config.model.target = "ldm.models.diffusion.ddpm.LatentInpaintDiffusion"
sd_config.model.params.use_ema = False
sd_config.model.params.conditioning_key = "hybrid"
sd_config.model.params.unet_config.params.in_channels = 9
# Create a "fake" config with a different name so that we know to unload it when switching models.
checkpoint_info = checkpoint_info._replace(config=checkpoint_info.config.replace(".yaml", "-inpainting.yaml"))
do_inpainting_hijack()
sd_model = instantiate_from_config(sd_config.model)
load_model_weights(sd_model, checkpoint_info)
if shared.cmd_opts.lowvram or shared.cmd_opts.medvram:
lowvram.setup_for_low_vram(sd_model, shared.cmd_opts.medvram)
else:
sd_model.to(shared.device)
sd_hijack.model_hijack.hijack(sd_model)
sd_model.eval()
shared.sd_model = sd_model
script_callbacks.model_loaded_callback(sd_model)
print(f"Model loaded.")
return sd_model
def reload_model_weights(sd_model=None, info=None):
from modules import lowvram, devices, sd_hijack
checkpoint_info = info or select_checkpoint()
if not sd_model:
sd_model = shared.sd_model
if sd_model.sd_model_checkpoint == checkpoint_info.filename:
return
if sd_model.sd_checkpoint_info.config != checkpoint_info.config or should_hijack_inpainting(checkpoint_info) != should_hijack_inpainting(sd_model.sd_checkpoint_info):
del sd_model
checkpoints_loaded.clear()
load_model(checkpoint_info)
return shared.sd_model
if shared.cmd_opts.lowvram or shared.cmd_opts.medvram:
lowvram.send_everything_to_cpu()
else:
sd_model.to(devices.cpu)
sd_hijack.model_hijack.undo_hijack(sd_model)
load_model_weights(sd_model, checkpoint_info)
sd_hijack.model_hijack.hijack(sd_model)
script_callbacks.model_loaded_callback(sd_model)
if not shared.cmd_opts.lowvram and not shared.cmd_opts.medvram:
sd_model.to(devices.device)
print(f"Weights loaded.")
return sd_model
<|code_end|>
modules/sd_vae.py
<|code_start|>import torch
import os
from collections import namedtuple
from modules import shared, devices, script_callbacks
from modules.paths import models_path
import glob
model_dir = "Stable-diffusion"
model_path = os.path.abspath(os.path.join(models_path, model_dir))
vae_dir = "VAE"
vae_path = os.path.abspath(os.path.join(models_path, vae_dir))
vae_ignore_keys = {"model_ema.decay", "model_ema.num_updates"}
default_vae_dict = {"auto": "auto", "None": "None"}
default_vae_list = ["auto", "None"]
default_vae_values = [default_vae_dict[x] for x in default_vae_list]
vae_dict = dict(default_vae_dict)
vae_list = list(default_vae_list)
first_load = True
base_vae = None
loaded_vae_file = None
checkpoint_info = None
def get_base_vae(model):
if base_vae is not None and checkpoint_info == model.sd_checkpoint_info and model:
return base_vae
return None
def store_base_vae(model):
global base_vae, checkpoint_info
if checkpoint_info != model.sd_checkpoint_info:
base_vae = model.first_stage_model.state_dict().copy()
checkpoint_info = model.sd_checkpoint_info
def delete_base_vae():
global base_vae, checkpoint_info
base_vae = None
checkpoint_info = None
def restore_base_vae(model):
global base_vae, checkpoint_info
if base_vae is not None and checkpoint_info == model.sd_checkpoint_info:
load_vae_dict(model, base_vae)
delete_base_vae()
def get_filename(filepath):
return os.path.splitext(os.path.basename(filepath))[0]
def refresh_vae_list(vae_path=vae_path, model_path=model_path):
global vae_dict, vae_list
res = {}
candidates = [
*glob.iglob(os.path.join(model_path, '**/*.vae.ckpt'), recursive=True),
*glob.iglob(os.path.join(model_path, '**/*.vae.pt'), recursive=True),
*glob.iglob(os.path.join(vae_path, '**/*.ckpt'), recursive=True),
*glob.iglob(os.path.join(vae_path, '**/*.pt'), recursive=True)
]
if shared.cmd_opts.vae_path is not None and os.path.isfile(shared.cmd_opts.vae_path):
candidates.append(shared.cmd_opts.vae_path)
for filepath in candidates:
name = get_filename(filepath)
res[name] = filepath
vae_list.clear()
vae_list.extend(default_vae_list)
vae_list.extend(list(res.keys()))
vae_dict.clear()
vae_dict.update(res)
vae_dict.update(default_vae_dict)
return vae_list
def get_vae_from_settings(vae_file="auto"):
# else, we load from settings, if not set to be default
if vae_file == "auto" and shared.opts.sd_vae is not None:
# if saved VAE settings isn't recognized, fallback to auto
vae_file = vae_dict.get(shared.opts.sd_vae, "auto")
# if VAE selected but not found, fallback to auto
if vae_file not in default_vae_values and not os.path.isfile(vae_file):
vae_file = "auto"
print("Selected VAE doesn't exist")
return vae_file
def resolve_vae(checkpoint_file=None, vae_file="auto"):
global first_load, vae_dict, vae_list
# if vae_file argument is provided, it takes priority, but not saved
if vae_file and vae_file not in default_vae_list:
if not os.path.isfile(vae_file):
vae_file = "auto"
print("VAE provided as function argument doesn't exist")
# for the first load, if vae-path is provided, it takes priority, saved, and failure is reported
if first_load and shared.cmd_opts.vae_path is not None:
if os.path.isfile(shared.cmd_opts.vae_path):
vae_file = shared.cmd_opts.vae_path
shared.opts.data['sd_vae'] = get_filename(vae_file)
else:
print("VAE provided as command line argument doesn't exist")
# fallback to selector in settings, if vae selector not set to act as default fallback
if not shared.opts.sd_vae_as_default:
vae_file = get_vae_from_settings(vae_file)
# vae-path cmd arg takes priority for auto
if vae_file == "auto" and shared.cmd_opts.vae_path is not None:
if os.path.isfile(shared.cmd_opts.vae_path):
vae_file = shared.cmd_opts.vae_path
print("Using VAE provided as command line argument")
# if still not found, try look for ".vae.pt" beside model
model_path = os.path.splitext(checkpoint_file)[0]
if vae_file == "auto":
vae_file_try = model_path + ".vae.pt"
if os.path.isfile(vae_file_try):
vae_file = vae_file_try
print("Using VAE found beside selected model")
# if still not found, try look for ".vae.ckpt" beside model
if vae_file == "auto":
vae_file_try = model_path + ".vae.ckpt"
if os.path.isfile(vae_file_try):
vae_file = vae_file_try
print("Using VAE found beside selected model")
# No more fallbacks for auto
if vae_file == "auto":
vae_file = None
# Last check, just because
if vae_file and not os.path.exists(vae_file):
vae_file = None
return vae_file
def load_vae(model, vae_file=None):
global first_load, vae_dict, vae_list, loaded_vae_file
# save_settings = False
if vae_file:
print(f"Loading VAE weights from: {vae_file}")
vae_ckpt = torch.load(vae_file, map_location=shared.weight_load_location)
vae_dict_1 = {k: v for k, v in vae_ckpt["state_dict"].items() if k[0:4] != "loss" and k not in vae_ignore_keys}
load_vae_dict(model, vae_dict_1)
# If vae used is not in dict, update it
# It will be removed on refresh though
vae_opt = get_filename(vae_file)
if vae_opt not in vae_dict:
vae_dict[vae_opt] = vae_file
vae_list.append(vae_opt)
loaded_vae_file = vae_file
"""
# Save current VAE to VAE settings, maybe? will it work?
if save_settings:
if vae_file is None:
vae_opt = "None"
# shared.opts.sd_vae = vae_opt
"""
first_load = False
# don't call this from outside
def load_vae_dict(model, vae_dict_1=None):
if vae_dict_1:
store_base_vae(model)
model.first_stage_model.load_state_dict(vae_dict_1)
else:
restore_base_vae()
model.first_stage_model.to(devices.dtype_vae)
def reload_vae_weights(sd_model=None, vae_file="auto"):
from modules import lowvram, devices, sd_hijack
if not sd_model:
sd_model = shared.sd_model
checkpoint_info = sd_model.sd_checkpoint_info
checkpoint_file = checkpoint_info.filename
vae_file = resolve_vae(checkpoint_file, vae_file=vae_file)
if loaded_vae_file == vae_file:
return
if shared.cmd_opts.lowvram or shared.cmd_opts.medvram:
lowvram.send_everything_to_cpu()
else:
sd_model.to(devices.cpu)
sd_hijack.model_hijack.undo_hijack(sd_model)
load_vae(sd_model, vae_file)
sd_hijack.model_hijack.hijack(sd_model)
script_callbacks.model_loaded_callback(sd_model)
if not shared.cmd_opts.lowvram and not shared.cmd_opts.medvram:
sd_model.to(devices.device)
print(f"VAE Weights loaded.")
return sd_model
<|code_end|>
modules/shared.py
<|code_start|>import argparse
import datetime
import json
import os
import sys
import time
import gradio as gr
import tqdm
import modules.artists
import modules.interrogate
import modules.memmon
import modules.sd_models
import modules.styles
import modules.devices as devices
from modules import sd_samplers, sd_models, localization, sd_vae, extensions, script_loading
from modules.hypernetworks import hypernetwork
from modules.paths import models_path, script_path, sd_path
sd_model_file = os.path.join(script_path, 'model.ckpt')
default_sd_model_file = sd_model_file
parser = argparse.ArgumentParser()
parser.add_argument("--config", type=str, default=os.path.join(sd_path, "configs/stable-diffusion/v1-inference.yaml"), help="path to config which constructs model",)
parser.add_argument("--ckpt", type=str, default=sd_model_file, help="path to checkpoint of stable diffusion model; if specified, this checkpoint will be added to the list of checkpoints and loaded",)
parser.add_argument("--ckpt-dir", type=str, default=None, help="Path to directory with stable diffusion checkpoints")
parser.add_argument("--gfpgan-dir", type=str, help="GFPGAN directory", default=('./src/gfpgan' if os.path.exists('./src/gfpgan') else './GFPGAN'))
parser.add_argument("--gfpgan-model", type=str, help="GFPGAN model file name", default=None)
parser.add_argument("--no-half", action='store_true', help="do not switch the model to 16-bit floats")
parser.add_argument("--no-half-vae", action='store_true', help="do not switch the VAE model to 16-bit floats")
parser.add_argument("--no-progressbar-hiding", action='store_true', help="do not hide progressbar in gradio UI (we hide it because it slows down ML if you have hardware acceleration in browser)")
parser.add_argument("--max-batch-count", type=int, default=16, help="maximum batch count value for the UI")
parser.add_argument("--embeddings-dir", type=str, default=os.path.join(script_path, 'embeddings'), help="embeddings directory for textual inversion (default: embeddings)")
parser.add_argument("--hypernetwork-dir", type=str, default=os.path.join(models_path, 'hypernetworks'), help="hypernetwork directory")
parser.add_argument("--localizations-dir", type=str, default=os.path.join(script_path, 'localizations'), help="localizations directory")
parser.add_argument("--allow-code", action='store_true', help="allow custom script execution from webui")
parser.add_argument("--medvram", action='store_true', help="enable stable diffusion model optimizations for sacrificing a little speed for low VRM usage")
parser.add_argument("--lowvram", action='store_true', help="enable stable diffusion model optimizations for sacrificing a lot of speed for very low VRM usage")
parser.add_argument("--lowram", action='store_true', help="load stable diffusion checkpoint weights to VRAM instead of RAM")
parser.add_argument("--always-batch-cond-uncond", action='store_true', help="disables cond/uncond batching that is enabled to save memory with --medvram or --lowvram")
parser.add_argument("--unload-gfpgan", action='store_true', help="does not do anything.")
parser.add_argument("--precision", type=str, help="evaluate at this precision", choices=["full", "autocast"], default="autocast")
parser.add_argument("--share", action='store_true', help="use share=True for gradio and make the UI accessible through their site")
parser.add_argument("--ngrok", type=str, help="ngrok authtoken, alternative to gradio --share", default=None)
parser.add_argument("--ngrok-region", type=str, help="The region in which ngrok should start.", default="us")
parser.add_argument("--enable-insecure-extension-access", action='store_true', help="enable extensions tab regardless of other options")
parser.add_argument("--codeformer-models-path", type=str, help="Path to directory with codeformer model file(s).", default=os.path.join(models_path, 'Codeformer'))
parser.add_argument("--gfpgan-models-path", type=str, help="Path to directory with GFPGAN model file(s).", default=os.path.join(models_path, 'GFPGAN'))
parser.add_argument("--esrgan-models-path", type=str, help="Path to directory with ESRGAN model file(s).", default=os.path.join(models_path, 'ESRGAN'))
parser.add_argument("--bsrgan-models-path", type=str, help="Path to directory with BSRGAN model file(s).", default=os.path.join(models_path, 'BSRGAN'))
parser.add_argument("--realesrgan-models-path", type=str, help="Path to directory with RealESRGAN model file(s).", default=os.path.join(models_path, 'RealESRGAN'))
parser.add_argument("--scunet-models-path", type=str, help="Path to directory with ScuNET model file(s).", default=os.path.join(models_path, 'ScuNET'))
parser.add_argument("--swinir-models-path", type=str, help="Path to directory with SwinIR model file(s).", default=os.path.join(models_path, 'SwinIR'))
parser.add_argument("--ldsr-models-path", type=str, help="Path to directory with LDSR model file(s).", default=os.path.join(models_path, 'LDSR'))
parser.add_argument("--clip-models-path", type=str, help="Path to directory with CLIP model file(s).", default=None)
parser.add_argument("--xformers", action='store_true', help="enable xformers for cross attention layers")
parser.add_argument("--force-enable-xformers", action='store_true', help="enable xformers for cross attention layers regardless of whether the checking code thinks you can run it; do not make bug reports if this fails to work")
parser.add_argument("--deepdanbooru", action='store_true', help="enable deepdanbooru interrogator")
parser.add_argument("--opt-split-attention", action='store_true', help="force-enables Doggettx's cross-attention layer optimization. By default, it's on for torch cuda.")
parser.add_argument("--opt-split-attention-invokeai", action='store_true', help="force-enables InvokeAI's cross-attention layer optimization. By default, it's on when cuda is unavailable.")
parser.add_argument("--opt-split-attention-v1", action='store_true', help="enable older version of split attention optimization that does not consume all the VRAM it can find")
parser.add_argument("--disable-opt-split-attention", action='store_true', help="force-disables cross-attention layer optimization")
parser.add_argument("--use-cpu", nargs='+',choices=['all', 'sd', 'interrogate', 'gfpgan', 'swinir', 'esrgan', 'scunet', 'codeformer'], help="use CPU as torch device for specified modules", default=[], type=str.lower)
parser.add_argument("--listen", action='store_true', help="launch gradio with 0.0.0.0 as server name, allowing to respond to network requests")
parser.add_argument("--port", type=int, help="launch gradio with given server port, you need root/admin rights for ports < 1024, defaults to 7860 if available", default=None)
parser.add_argument("--show-negative-prompt", action='store_true', help="does not do anything", default=False)
parser.add_argument("--ui-config-file", type=str, help="filename to use for ui configuration", default=os.path.join(script_path, 'ui-config.json'))
parser.add_argument("--hide-ui-dir-config", action='store_true', help="hide directory configuration from webui", default=False)
parser.add_argument("--freeze-settings", action='store_true', help="disable editing settings", default=False)
parser.add_argument("--ui-settings-file", type=str, help="filename to use for ui settings", default=os.path.join(script_path, 'config.json'))
parser.add_argument("--gradio-debug", action='store_true', help="launch gradio with --debug option")
parser.add_argument("--gradio-auth", type=str, help='set gradio authentication like "username:password"; or comma-delimit multiple like "u1:p1,u2:p2,u3:p3"', default=None)
parser.add_argument("--gradio-img2img-tool", type=str, help='gradio image uploader tool: can be either editor for ctopping, or color-sketch for drawing', choices=["color-sketch", "editor"], default="editor")
parser.add_argument("--opt-channelslast", action='store_true', help="change memory type for stable diffusion to channels last")
parser.add_argument("--styles-file", type=str, help="filename to use for styles", default=os.path.join(script_path, 'styles.csv'))
parser.add_argument("--autolaunch", action='store_true', help="open the webui URL in the system's default browser upon launch", default=False)
parser.add_argument("--theme", type=str, help="launches the UI with light or dark theme", default=None)
parser.add_argument("--use-textbox-seed", action='store_true', help="use textbox for seeds in UI (no up/down, but possible to input long seeds)", default=False)
parser.add_argument("--disable-console-progressbars", action='store_true', help="do not output progressbars to console", default=False)
parser.add_argument("--enable-console-prompts", action='store_true', help="print prompts to console when generating with txt2img and img2img", default=False)
parser.add_argument('--vae-path', type=str, help='Path to Variational Autoencoders model', default=None)
parser.add_argument("--disable-safe-unpickle", action='store_true', help="disable checking pytorch models for malicious code", default=False)
parser.add_argument("--api", action='store_true', help="use api=True to launch the api with the webui")
parser.add_argument("--nowebui", action='store_true', help="use api=True to launch the api instead of the webui")
parser.add_argument("--ui-debug-mode", action='store_true', help="Don't load model to quickly launch UI")
parser.add_argument("--device-id", type=str, help="Select the default CUDA device to use (export CUDA_VISIBLE_DEVICES=0,1,etc might be needed before)", default=None)
parser.add_argument("--administrator", action='store_true', help="Administrator rights", default=False)
parser.add_argument("--cors-allow-origins", type=str, help="Allowed CORS origins", default=None)
parser.add_argument("--tls-keyfile", type=str, help="Partially enables TLS, requires --tls-certfile to fully function", default=None)
parser.add_argument("--tls-certfile", type=str, help="Partially enables TLS, requires --tls-keyfile to fully function", default=None)
parser.add_argument("--server-name", type=str, help="Sets hostname of server", default=None)
script_loading.preload_extensions(extensions.extensions_dir, parser)
cmd_opts = parser.parse_args()
restricted_opts = {
"samples_filename_pattern",
"directories_filename_pattern",
"outdir_samples",
"outdir_txt2img_samples",
"outdir_img2img_samples",
"outdir_extras_samples",
"outdir_grids",
"outdir_txt2img_grids",
"outdir_save",
}
cmd_opts.disable_extension_access = (cmd_opts.share or cmd_opts.listen) and not cmd_opts.enable_insecure_extension_access
devices.device, devices.device_interrogate, devices.device_gfpgan, devices.device_swinir, devices.device_esrgan, devices.device_scunet, devices.device_codeformer = \
(devices.cpu if any(y in cmd_opts.use_cpu for y in [x, 'all']) else devices.get_optimal_device() for x in ['sd', 'interrogate', 'gfpgan', 'swinir', 'esrgan', 'scunet', 'codeformer'])
device = devices.device
weight_load_location = None if cmd_opts.lowram else "cpu"
batch_cond_uncond = cmd_opts.always_batch_cond_uncond or not (cmd_opts.lowvram or cmd_opts.medvram)
parallel_processing_allowed = not cmd_opts.lowvram and not cmd_opts.medvram
xformers_available = False
config_filename = cmd_opts.ui_settings_file
os.makedirs(cmd_opts.hypernetwork_dir, exist_ok=True)
hypernetworks = hypernetwork.list_hypernetworks(cmd_opts.hypernetwork_dir)
loaded_hypernetwork = None
def reload_hypernetworks():
global hypernetworks
hypernetworks = hypernetwork.list_hypernetworks(cmd_opts.hypernetwork_dir)
hypernetwork.load_hypernetwork(opts.sd_hypernetwork)
class State:
skipped = False
interrupted = False
job = ""
job_no = 0
job_count = 0
job_timestamp = '0'
sampling_step = 0
sampling_steps = 0
current_latent = None
current_image = None
current_image_sampling_step = 0
textinfo = None
time_start = None
need_restart = False
def skip(self):
self.skipped = True
def interrupt(self):
self.interrupted = True
def nextjob(self):
if opts.show_progress_every_n_steps == -1:
self.do_set_current_image()
self.job_no += 1
self.sampling_step = 0
self.current_image_sampling_step = 0
def dict(self):
obj = {
"skipped": self.skipped,
"interrupted": self.skipped,
"job": self.job,
"job_count": self.job_count,
"job_no": self.job_no,
"sampling_step": self.sampling_step,
"sampling_steps": self.sampling_steps,
}
return obj
def begin(self):
self.sampling_step = 0
self.job_count = -1
self.job_no = 0
self.job_timestamp = datetime.datetime.now().strftime("%Y%m%d%H%M%S")
self.current_latent = None
self.current_image = None
self.current_image_sampling_step = 0
self.skipped = False
self.interrupted = False
self.textinfo = None
self.time_start = time.time()
devices.torch_gc()
def end(self):
self.job = ""
self.job_count = 0
devices.torch_gc()
"""sets self.current_image from self.current_latent if enough sampling steps have been made after the last call to this"""
def set_current_image(self):
if self.sampling_step - self.current_image_sampling_step >= opts.show_progress_every_n_steps and opts.show_progress_every_n_steps > 0:
self.do_set_current_image()
def do_set_current_image(self):
if not parallel_processing_allowed:
return
if self.current_latent is None:
return
if opts.show_progress_grid:
self.current_image = sd_samplers.samples_to_image_grid(self.current_latent)
else:
self.current_image = sd_samplers.sample_to_image(self.current_latent)
self.current_image_sampling_step = self.sampling_step
state = State()
artist_db = modules.artists.ArtistsDatabase(os.path.join(script_path, 'artists.csv'))
styles_filename = cmd_opts.styles_file
prompt_styles = modules.styles.StyleDatabase(styles_filename)
interrogator = modules.interrogate.InterrogateModels("interrogate")
face_restorers = []
def realesrgan_models_names():
import modules.realesrgan_model
return [x.name for x in modules.realesrgan_model.get_realesrgan_models(None)]
class OptionInfo:
def __init__(self, default=None, label="", component=None, component_args=None, onchange=None, section=None, refresh=None):
self.default = default
self.label = label
self.component = component
self.component_args = component_args
self.onchange = onchange
self.section = section
self.refresh = refresh
def options_section(section_identifier, options_dict):
for k, v in options_dict.items():
v.section = section_identifier
return options_dict
hide_dirs = {"visible": not cmd_opts.hide_ui_dir_config}
options_templates = {}
options_templates.update(options_section(('saving-images', "Saving images/grids"), {
"samples_save": OptionInfo(True, "Always save all generated images"),
"samples_format": OptionInfo('png', 'File format for images'),
"samples_filename_pattern": OptionInfo("", "Images filename pattern", component_args=hide_dirs),
"save_images_add_number": OptionInfo(True, "Add number to filename when saving", component_args=hide_dirs),
"grid_save": OptionInfo(True, "Always save all generated image grids"),
"grid_format": OptionInfo('png', 'File format for grids'),
"grid_extended_filename": OptionInfo(False, "Add extended info (seed, prompt) to filename when saving grid"),
"grid_only_if_multiple": OptionInfo(True, "Do not save grids consisting of one picture"),
"grid_prevent_empty_spots": OptionInfo(False, "Prevent empty spots in grid (when set to autodetect)"),
"n_rows": OptionInfo(-1, "Grid row count; use -1 for autodetect and 0 for it to be same as batch size", gr.Slider, {"minimum": -1, "maximum": 16, "step": 1}),
"enable_pnginfo": OptionInfo(True, "Save text information about generation parameters as chunks to png files"),
"save_txt": OptionInfo(False, "Create a text file next to every image with generation parameters."),
"save_images_before_face_restoration": OptionInfo(False, "Save a copy of image before doing face restoration."),
"save_images_before_highres_fix": OptionInfo(False, "Save a copy of image before applying highres fix."),
"save_images_before_color_correction": OptionInfo(False, "Save a copy of image before applying color correction to img2img results"),
"jpeg_quality": OptionInfo(80, "Quality for saved jpeg images", gr.Slider, {"minimum": 1, "maximum": 100, "step": 1}),
"export_for_4chan": OptionInfo(True, "If PNG image is larger than 4MB or any dimension is larger than 4000, downscale and save copy as JPG"),
"use_original_name_batch": OptionInfo(False, "Use original name for output filename during batch process in extras tab"),
"save_selected_only": OptionInfo(True, "When using 'Save' button, only save a single selected image"),
"do_not_add_watermark": OptionInfo(False, "Do not add watermark to images"),
}))
options_templates.update(options_section(('saving-paths', "Paths for saving"), {
"outdir_samples": OptionInfo("", "Output directory for images; if empty, defaults to three directories below", component_args=hide_dirs),
"outdir_txt2img_samples": OptionInfo("outputs/txt2img-images", 'Output directory for txt2img images', component_args=hide_dirs),
"outdir_img2img_samples": OptionInfo("outputs/img2img-images", 'Output directory for img2img images', component_args=hide_dirs),
"outdir_extras_samples": OptionInfo("outputs/extras-images", 'Output directory for images from extras tab', component_args=hide_dirs),
"outdir_grids": OptionInfo("", "Output directory for grids; if empty, defaults to two directories below", component_args=hide_dirs),
"outdir_txt2img_grids": OptionInfo("outputs/txt2img-grids", 'Output directory for txt2img grids', component_args=hide_dirs),
"outdir_img2img_grids": OptionInfo("outputs/img2img-grids", 'Output directory for img2img grids', component_args=hide_dirs),
"outdir_save": OptionInfo("log/images", "Directory for saving images using the Save button", component_args=hide_dirs),
}))
options_templates.update(options_section(('saving-to-dirs', "Saving to a directory"), {
"save_to_dirs": OptionInfo(False, "Save images to a subdirectory"),
"grid_save_to_dirs": OptionInfo(False, "Save grids to a subdirectory"),
"use_save_to_dirs_for_ui": OptionInfo(False, "When using \"Save\" button, save images to a subdirectory"),
"directories_filename_pattern": OptionInfo("", "Directory name pattern", component_args=hide_dirs),
"directories_max_prompt_words": OptionInfo(8, "Max prompt words for [prompt_words] pattern", gr.Slider, {"minimum": 1, "maximum": 20, "step": 1, **hide_dirs}),
}))
options_templates.update(options_section(('upscaling', "Upscaling"), {
"ESRGAN_tile": OptionInfo(192, "Tile size for ESRGAN upscalers. 0 = no tiling.", gr.Slider, {"minimum": 0, "maximum": 512, "step": 16}),
"ESRGAN_tile_overlap": OptionInfo(8, "Tile overlap, in pixels for ESRGAN upscalers. Low values = visible seam.", gr.Slider, {"minimum": 0, "maximum": 48, "step": 1}),
"realesrgan_enabled_models": OptionInfo(["R-ESRGAN x4+", "R-ESRGAN x4+ Anime6B"], "Select which Real-ESRGAN models to show in the web UI. (Requires restart)", gr.CheckboxGroup, lambda: {"choices": realesrgan_models_names()}),
"SWIN_tile": OptionInfo(192, "Tile size for all SwinIR.", gr.Slider, {"minimum": 16, "maximum": 512, "step": 16}),
"SWIN_tile_overlap": OptionInfo(8, "Tile overlap, in pixels for SwinIR. Low values = visible seam.", gr.Slider, {"minimum": 0, "maximum": 48, "step": 1}),
"ldsr_steps": OptionInfo(100, "LDSR processing steps. Lower = faster", gr.Slider, {"minimum": 1, "maximum": 200, "step": 1}),
"upscaler_for_img2img": OptionInfo(None, "Upscaler for img2img", gr.Dropdown, lambda: {"choices": [x.name for x in sd_upscalers]}),
"use_scale_latent_for_hires_fix": OptionInfo(False, "Upscale latent space image when doing hires. fix"),
}))
options_templates.update(options_section(('face-restoration', "Face restoration"), {
"face_restoration_model": OptionInfo(None, "Face restoration model", gr.Radio, lambda: {"choices": [x.name() for x in face_restorers]}),
"code_former_weight": OptionInfo(0.5, "CodeFormer weight parameter; 0 = maximum effect; 1 = minimum effect", gr.Slider, {"minimum": 0, "maximum": 1, "step": 0.01}),
"face_restoration_unload": OptionInfo(False, "Move face restoration model from VRAM into RAM after processing"),
}))
options_templates.update(options_section(('system', "System"), {
"memmon_poll_rate": OptionInfo(8, "VRAM usage polls per second during generation. Set to 0 to disable.", gr.Slider, {"minimum": 0, "maximum": 40, "step": 1}),
"samples_log_stdout": OptionInfo(False, "Always print all generation info to standard output"),
"multiple_tqdm": OptionInfo(True, "Add a second progress bar to the console that shows progress for an entire job."),
}))
options_templates.update(options_section(('training', "Training"), {
"unload_models_when_training": OptionInfo(False, "Move VAE and CLIP to RAM when training if possible. Saves VRAM."),
"shuffle_tags": OptionInfo(False, "Shuffleing tags by ',' when create texts."),
"tag_drop_out": OptionInfo(0, "Dropout tags when create texts", gr.Slider, {"minimum": 0, "maximum": 1, "step": 0.1}),
"save_optimizer_state": OptionInfo(False, "Saves Optimizer state as separate *.optim file. Training can be resumed with HN itself and matching optim file."),
"dataset_filename_word_regex": OptionInfo("", "Filename word regex"),
"dataset_filename_join_string": OptionInfo(" ", "Filename join string"),
"training_image_repeats_per_epoch": OptionInfo(1, "Number of repeats for a single input image per epoch; used only for displaying epoch number", gr.Number, {"precision": 0}),
"training_write_csv_every": OptionInfo(500, "Save an csv containing the loss to log directory every N steps, 0 to disable"),
"training_xattention_optimizations": OptionInfo(False, "Use cross attention optimizations while training"),
}))
options_templates.update(options_section(('sd', "Stable Diffusion"), {
"sd_model_checkpoint": OptionInfo(None, "Stable Diffusion checkpoint", gr.Dropdown, lambda: {"choices": modules.sd_models.checkpoint_tiles()}, refresh=sd_models.list_models),
"sd_checkpoint_cache": OptionInfo(0, "Checkpoints to cache in RAM", gr.Slider, {"minimum": 0, "maximum": 10, "step": 1}),
"sd_vae": OptionInfo("auto", "SD VAE", gr.Dropdown, lambda: {"choices": list(sd_vae.vae_list)}, refresh=sd_vae.refresh_vae_list),
"sd_vae_as_default": OptionInfo(False, "Ignore selected VAE for stable diffusion checkpoints that have their own .vae.pt next to them"),
"sd_hypernetwork": OptionInfo("None", "Hypernetwork", gr.Dropdown, lambda: {"choices": ["None"] + [x for x in hypernetworks.keys()]}, refresh=reload_hypernetworks),
"sd_hypernetwork_strength": OptionInfo(1.0, "Hypernetwork strength", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.001}),
"inpainting_mask_weight": OptionInfo(1.0, "Inpainting conditioning mask strength", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}),
"img2img_color_correction": OptionInfo(False, "Apply color correction to img2img results to match original colors."),
"img2img_fix_steps": OptionInfo(False, "With img2img, do exactly the amount of steps the slider specifies (normally you'd do less with less denoising)."),
"enable_quantization": OptionInfo(False, "Enable quantization in K samplers for sharper and cleaner results. This may change existing seeds. Requires restart to apply."),
"enable_emphasis": OptionInfo(True, "Emphasis: use (text) to make model pay more attention to text and [text] to make it pay less attention"),
"use_old_emphasis_implementation": OptionInfo(False, "Use old emphasis implementation. Can be useful to reproduce old seeds."),
"enable_batch_seeds": OptionInfo(True, "Make K-diffusion samplers produce same images in a batch as when making a single image"),
"comma_padding_backtrack": OptionInfo(20, "Increase coherency by padding from the last comma within n tokens when using more than 75 tokens", gr.Slider, {"minimum": 0, "maximum": 74, "step": 1 }),
"filter_nsfw": OptionInfo(False, "Filter NSFW content"),
'CLIP_stop_at_last_layers': OptionInfo(1, "Stop At last layers of CLIP model", gr.Slider, {"minimum": 1, "maximum": 12, "step": 1}),
"random_artist_categories": OptionInfo([], "Allowed categories for random artists selection when using the Roll button", gr.CheckboxGroup, {"choices": artist_db.categories()}),
}))
options_templates.update(options_section(('interrogate', "Interrogate Options"), {
"interrogate_keep_models_in_memory": OptionInfo(False, "Interrogate: keep models in VRAM"),
"interrogate_use_builtin_artists": OptionInfo(True, "Interrogate: use artists from artists.csv"),
"interrogate_return_ranks": OptionInfo(False, "Interrogate: include ranks of model tags matches in results (Has no effect on caption-based interrogators)."),
"interrogate_clip_num_beams": OptionInfo(1, "Interrogate: num_beams for BLIP", gr.Slider, {"minimum": 1, "maximum": 16, "step": 1}),
"interrogate_clip_min_length": OptionInfo(24, "Interrogate: minimum description length (excluding artists, etc..)", gr.Slider, {"minimum": 1, "maximum": 128, "step": 1}),
"interrogate_clip_max_length": OptionInfo(48, "Interrogate: maximum description length", gr.Slider, {"minimum": 1, "maximum": 256, "step": 1}),
"interrogate_clip_dict_limit": OptionInfo(1500, "CLIP: maximum number of lines in text file (0 = No limit)"),
"interrogate_deepbooru_score_threshold": OptionInfo(0.5, "Interrogate: deepbooru score threshold", gr.Slider, {"minimum": 0, "maximum": 1, "step": 0.01}),
"deepbooru_sort_alpha": OptionInfo(True, "Interrogate: deepbooru sort alphabetically"),
"deepbooru_use_spaces": OptionInfo(False, "use spaces for tags in deepbooru"),
"deepbooru_escape": OptionInfo(True, "escape (\\) brackets in deepbooru (so they are used as literal brackets and not for emphasis)"),
}))
options_templates.update(options_section(('ui', "User interface"), {
"show_progressbar": OptionInfo(True, "Show progressbar"),
"show_progress_every_n_steps": OptionInfo(0, "Show image creation progress every N sampling steps. Set to 0 to disable. Set to -1 to show after completion of batch.", gr.Slider, {"minimum": -1, "maximum": 32, "step": 1}),
"show_progress_grid": OptionInfo(True, "Show previews of all images generated in a batch as a grid"),
"return_grid": OptionInfo(True, "Show grid in results for web"),
"do_not_show_images": OptionInfo(False, "Do not show any images in results for web"),
"add_model_hash_to_info": OptionInfo(True, "Add model hash to generation information"),
"add_model_name_to_info": OptionInfo(False, "Add model name to generation information"),
"disable_weights_auto_swap": OptionInfo(False, "When reading generation parameters from text into UI (from PNG info or pasted text), do not change the selected model/checkpoint."),
"send_seed": OptionInfo(True, "Send seed when sending prompt or image to other interface"),
"font": OptionInfo("", "Font for image grids that have text"),
"js_modal_lightbox": OptionInfo(True, "Enable full page image viewer"),
"js_modal_lightbox_initially_zoomed": OptionInfo(True, "Show images zoomed in by default in full page image viewer"),
"show_progress_in_title": OptionInfo(True, "Show generation progress in window title."),
'quicksettings': OptionInfo("sd_model_checkpoint", "Quicksettings list"),
'localization': OptionInfo("None", "Localization (requires restart)", gr.Dropdown, lambda: {"choices": ["None"] + list(localization.localizations.keys())}, refresh=lambda: localization.list_localizations(cmd_opts.localizations_dir)),
}))
options_templates.update(options_section(('sampler-params', "Sampler parameters"), {
"hide_samplers": OptionInfo([], "Hide samplers in user interface (requires restart)", gr.CheckboxGroup, lambda: {"choices": [x.name for x in sd_samplers.all_samplers]}),
"eta_ddim": OptionInfo(0.0, "eta (noise multiplier) for DDIM", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}),
"eta_ancestral": OptionInfo(1.0, "eta (noise multiplier) for ancestral samplers", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}),
"ddim_discretize": OptionInfo('uniform', "img2img DDIM discretize", gr.Radio, {"choices": ['uniform', 'quad']}),
's_churn': OptionInfo(0.0, "sigma churn", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}),
's_tmin': OptionInfo(0.0, "sigma tmin", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}),
's_noise': OptionInfo(1.0, "sigma noise", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}),
'eta_noise_seed_delta': OptionInfo(0, "Eta noise seed delta", gr.Number, {"precision": 0}),
}))
options_templates.update(options_section((None, "Hidden options"), {
"disabled_extensions": OptionInfo([], "Disable those extensions"),
}))
options_templates.update()
class Options:
data = None
data_labels = options_templates
typemap = {int: float}
def __init__(self):
self.data = {k: v.default for k, v in self.data_labels.items()}
def __setattr__(self, key, value):
if self.data is not None:
if key in self.data or key in self.data_labels:
assert not cmd_opts.freeze_settings, "changing settings is disabled"
info = opts.data_labels.get(key, None)
comp_args = info.component_args if info else None
if isinstance(comp_args, dict) and comp_args.get('visible', True) is False:
raise RuntimeError(f"not possible to set {key} because it is restricted")
if cmd_opts.hide_ui_dir_config and key in restricted_opts:
raise RuntimeError(f"not possible to set {key} because it is restricted")
self.data[key] = value
return
return super(Options, self).__setattr__(key, value)
def __getattr__(self, item):
if self.data is not None:
if item in self.data:
return self.data[item]
if item in self.data_labels:
return self.data_labels[item].default
return super(Options, self).__getattribute__(item)
def save(self, filename):
assert not cmd_opts.freeze_settings, "saving settings is disabled"
with open(filename, "w", encoding="utf8") as file:
json.dump(self.data, file, indent=4)
def same_type(self, x, y):
if x is None or y is None:
return True
type_x = self.typemap.get(type(x), type(x))
type_y = self.typemap.get(type(y), type(y))
return type_x == type_y
def load(self, filename):
with open(filename, "r", encoding="utf8") as file:
self.data = json.load(file)
bad_settings = 0
for k, v in self.data.items():
info = self.data_labels.get(k, None)
if info is not None and not self.same_type(info.default, v):
print(f"Warning: bad setting value: {k}: {v} ({type(v).__name__}; expected {type(info.default).__name__})", file=sys.stderr)
bad_settings += 1
if bad_settings > 0:
print(f"The program is likely to not work with bad settings.\nSettings file: {filename}\nEither fix the file, or delete it and restart.", file=sys.stderr)
def onchange(self, key, func, call=True):
item = self.data_labels.get(key)
item.onchange = func
if call:
func()
def dumpjson(self):
d = {k: self.data.get(k, self.data_labels.get(k).default) for k in self.data_labels.keys()}
return json.dumps(d)
def add_option(self, key, info):
self.data_labels[key] = info
def reorder(self):
"""reorder settings so that all items related to section always go together"""
section_ids = {}
settings_items = self.data_labels.items()
for k, item in settings_items:
if item.section not in section_ids:
section_ids[item.section] = len(section_ids)
self.data_labels = {k: v for k, v in sorted(settings_items, key=lambda x: section_ids[x[1].section])}
opts = Options()
if os.path.exists(config_filename):
opts.load(config_filename)
sd_upscalers = []
sd_model = None
clip_model = None
progress_print_out = sys.stdout
class TotalTQDM:
def __init__(self):
self._tqdm = None
def reset(self):
self._tqdm = tqdm.tqdm(
desc="Total progress",
total=state.job_count * state.sampling_steps,
position=1,
file=progress_print_out
)
def update(self):
if not opts.multiple_tqdm or cmd_opts.disable_console_progressbars:
return
if self._tqdm is None:
self.reset()
self._tqdm.update()
def updateTotal(self, new_total):
if not opts.multiple_tqdm or cmd_opts.disable_console_progressbars:
return
if self._tqdm is None:
self.reset()
self._tqdm.total=new_total
def clear(self):
if self._tqdm is not None:
self._tqdm.close()
self._tqdm = None
total_tqdm = TotalTQDM()
mem_mon = modules.memmon.MemUsageMonitor("MemMon", device, opts)
mem_mon.start()
def listfiles(dirname):
filenames = [os.path.join(dirname, x) for x in sorted(os.listdir(dirname)) if not x.startswith(".")]
return [file for file in filenames if os.path.isfile(file)]
<|code_end|>
|
modules/sd_models.py
<|code_start|>import collections
import os.path
import sys
import gc
from collections import namedtuple
import torch
import re
from omegaconf import OmegaConf
from ldm.util import instantiate_from_config
from modules import shared, modelloader, devices, script_callbacks, sd_vae
from modules.paths import models_path
from modules.sd_hijack_inpainting import do_inpainting_hijack, should_hijack_inpainting
model_dir = "Stable-diffusion"
model_path = os.path.abspath(os.path.join(models_path, model_dir))
CheckpointInfo = namedtuple("CheckpointInfo", ['filename', 'title', 'hash', 'model_name', 'config'])
checkpoints_list = {}
checkpoints_loaded = collections.OrderedDict()
try:
# this silences the annoying "Some weights of the model checkpoint were not used when initializing..." message at start.
from transformers import logging, CLIPModel
logging.set_verbosity_error()
except Exception:
pass
def setup_model():
if not os.path.exists(model_path):
os.makedirs(model_path)
list_models()
def checkpoint_tiles():
convert = lambda name: int(name) if name.isdigit() else name.lower()
alphanumeric_key = lambda key: [convert(c) for c in re.split('([0-9]+)', key)]
return sorted([x.title for x in checkpoints_list.values()], key = alphanumeric_key)
def list_models():
checkpoints_list.clear()
model_list = modelloader.load_models(model_path=model_path, command_path=shared.cmd_opts.ckpt_dir, ext_filter=[".ckpt"])
def modeltitle(path, shorthash):
abspath = os.path.abspath(path)
if shared.cmd_opts.ckpt_dir is not None and abspath.startswith(shared.cmd_opts.ckpt_dir):
name = abspath.replace(shared.cmd_opts.ckpt_dir, '')
elif abspath.startswith(model_path):
name = abspath.replace(model_path, '')
else:
name = os.path.basename(path)
if name.startswith("\\") or name.startswith("/"):
name = name[1:]
shortname = os.path.splitext(name.replace("/", "_").replace("\\", "_"))[0]
return f'{name} [{shorthash}]', shortname
cmd_ckpt = shared.cmd_opts.ckpt
if os.path.exists(cmd_ckpt):
h = model_hash(cmd_ckpt)
title, short_model_name = modeltitle(cmd_ckpt, h)
checkpoints_list[title] = CheckpointInfo(cmd_ckpt, title, h, short_model_name, shared.cmd_opts.config)
shared.opts.data['sd_model_checkpoint'] = title
elif cmd_ckpt is not None and cmd_ckpt != shared.default_sd_model_file:
print(f"Checkpoint in --ckpt argument not found (Possible it was moved to {model_path}: {cmd_ckpt}", file=sys.stderr)
for filename in model_list:
h = model_hash(filename)
title, short_model_name = modeltitle(filename, h)
basename, _ = os.path.splitext(filename)
config = basename + ".yaml"
if not os.path.exists(config):
config = shared.cmd_opts.config
checkpoints_list[title] = CheckpointInfo(filename, title, h, short_model_name, config)
def get_closet_checkpoint_match(searchString):
applicable = sorted([info for info in checkpoints_list.values() if searchString in info.title], key = lambda x:len(x.title))
if len(applicable) > 0:
return applicable[0]
return None
def model_hash(filename):
try:
with open(filename, "rb") as file:
import hashlib
m = hashlib.sha256()
file.seek(0x100000)
m.update(file.read(0x10000))
return m.hexdigest()[0:8]
except FileNotFoundError:
return 'NOFILE'
def select_checkpoint():
model_checkpoint = shared.opts.sd_model_checkpoint
checkpoint_info = checkpoints_list.get(model_checkpoint, None)
if checkpoint_info is not None:
return checkpoint_info
if len(checkpoints_list) == 0:
print(f"No checkpoints found. When searching for checkpoints, looked at:", file=sys.stderr)
if shared.cmd_opts.ckpt is not None:
print(f" - file {os.path.abspath(shared.cmd_opts.ckpt)}", file=sys.stderr)
print(f" - directory {model_path}", file=sys.stderr)
if shared.cmd_opts.ckpt_dir is not None:
print(f" - directory {os.path.abspath(shared.cmd_opts.ckpt_dir)}", file=sys.stderr)
print(f"Can't run without a checkpoint. Find and place a .ckpt file into any of those locations. The program will exit.", file=sys.stderr)
exit(1)
checkpoint_info = next(iter(checkpoints_list.values()))
if model_checkpoint is not None:
print(f"Checkpoint {model_checkpoint} not found; loading fallback {checkpoint_info.title}", file=sys.stderr)
return checkpoint_info
chckpoint_dict_replacements = {
'cond_stage_model.transformer.embeddings.': 'cond_stage_model.transformer.text_model.embeddings.',
'cond_stage_model.transformer.encoder.': 'cond_stage_model.transformer.text_model.encoder.',
'cond_stage_model.transformer.final_layer_norm.': 'cond_stage_model.transformer.text_model.final_layer_norm.',
}
def transform_checkpoint_dict_key(k):
for text, replacement in chckpoint_dict_replacements.items():
if k.startswith(text):
k = replacement + k[len(text):]
return k
def get_state_dict_from_checkpoint(pl_sd):
if "state_dict" in pl_sd:
pl_sd = pl_sd["state_dict"]
sd = {}
for k, v in pl_sd.items():
new_key = transform_checkpoint_dict_key(k)
if new_key is not None:
sd[new_key] = v
pl_sd.clear()
pl_sd.update(sd)
return pl_sd
def load_model_weights(model, checkpoint_info, vae_file="auto"):
checkpoint_file = checkpoint_info.filename
sd_model_hash = checkpoint_info.hash
cache_enabled = shared.opts.sd_checkpoint_cache > 0
if cache_enabled and checkpoint_info in checkpoints_loaded:
# use checkpoint cache
print(f"Loading weights [{sd_model_hash}] from cache")
model.load_state_dict(checkpoints_loaded[checkpoint_info])
else:
# load from file
print(f"Loading weights [{sd_model_hash}] from {checkpoint_file}")
pl_sd = torch.load(checkpoint_file, map_location=shared.weight_load_location)
if "global_step" in pl_sd:
print(f"Global Step: {pl_sd['global_step']}")
sd = get_state_dict_from_checkpoint(pl_sd)
del pl_sd
model.load_state_dict(sd, strict=False)
del sd
if cache_enabled:
# cache newly loaded model
checkpoints_loaded[checkpoint_info] = model.state_dict().copy()
if shared.cmd_opts.opt_channelslast:
model.to(memory_format=torch.channels_last)
if not shared.cmd_opts.no_half:
vae = model.first_stage_model
# with --no-half-vae, remove VAE from model when doing half() to prevent its weights from being converted to float16
if shared.cmd_opts.no_half_vae:
model.first_stage_model = None
model.half()
model.first_stage_model = vae
devices.dtype = torch.float32 if shared.cmd_opts.no_half else torch.float16
devices.dtype_vae = torch.float32 if shared.cmd_opts.no_half or shared.cmd_opts.no_half_vae else torch.float16
model.first_stage_model.to(devices.dtype_vae)
# clean up cache if limit is reached
if cache_enabled:
while len(checkpoints_loaded) > shared.opts.sd_checkpoint_cache + 1: # we need to count the current model
checkpoints_loaded.popitem(last=False) # LRU
model.sd_model_hash = sd_model_hash
model.sd_model_checkpoint = checkpoint_file
model.sd_checkpoint_info = checkpoint_info
vae_file = sd_vae.resolve_vae(checkpoint_file, vae_file=vae_file)
sd_vae.load_vae(model, vae_file)
def load_model(checkpoint_info=None):
from modules import lowvram, sd_hijack
checkpoint_info = checkpoint_info or select_checkpoint()
if checkpoint_info.config != shared.cmd_opts.config:
print(f"Loading config from: {checkpoint_info.config}")
if shared.sd_model:
sd_hijack.model_hijack.undo_hijack(shared.sd_model)
shared.sd_model = None
gc.collect()
devices.torch_gc()
sd_config = OmegaConf.load(checkpoint_info.config)
if should_hijack_inpainting(checkpoint_info):
# Hardcoded config for now...
sd_config.model.target = "ldm.models.diffusion.ddpm.LatentInpaintDiffusion"
sd_config.model.params.use_ema = False
sd_config.model.params.conditioning_key = "hybrid"
sd_config.model.params.unet_config.params.in_channels = 9
# Create a "fake" config with a different name so that we know to unload it when switching models.
checkpoint_info = checkpoint_info._replace(config=checkpoint_info.config.replace(".yaml", "-inpainting.yaml"))
do_inpainting_hijack()
sd_model = instantiate_from_config(sd_config.model)
load_model_weights(sd_model, checkpoint_info)
if shared.cmd_opts.lowvram or shared.cmd_opts.medvram:
lowvram.setup_for_low_vram(sd_model, shared.cmd_opts.medvram)
else:
sd_model.to(shared.device)
sd_hijack.model_hijack.hijack(sd_model)
sd_model.eval()
shared.sd_model = sd_model
script_callbacks.model_loaded_callback(sd_model)
print(f"Model loaded.")
return sd_model
def reload_model_weights(sd_model=None, info=None):
from modules import lowvram, devices, sd_hijack
checkpoint_info = info or select_checkpoint()
if not sd_model:
sd_model = shared.sd_model
if sd_model.sd_model_checkpoint == checkpoint_info.filename:
return
if sd_model.sd_checkpoint_info.config != checkpoint_info.config or should_hijack_inpainting(checkpoint_info) != should_hijack_inpainting(sd_model.sd_checkpoint_info):
del sd_model
checkpoints_loaded.clear()
load_model(checkpoint_info)
return shared.sd_model
if shared.cmd_opts.lowvram or shared.cmd_opts.medvram:
lowvram.send_everything_to_cpu()
else:
sd_model.to(devices.cpu)
sd_hijack.model_hijack.undo_hijack(sd_model)
load_model_weights(sd_model, checkpoint_info)
sd_hijack.model_hijack.hijack(sd_model)
script_callbacks.model_loaded_callback(sd_model)
if not shared.cmd_opts.lowvram and not shared.cmd_opts.medvram:
sd_model.to(devices.device)
print(f"Weights loaded.")
return sd_model
<|code_end|>
modules/sd_vae.py
<|code_start|>import torch
import os
from collections import namedtuple
from modules import shared, devices, script_callbacks
from modules.paths import models_path
import glob
model_dir = "Stable-diffusion"
model_path = os.path.abspath(os.path.join(models_path, model_dir))
vae_dir = "VAE"
vae_path = os.path.abspath(os.path.join(models_path, vae_dir))
vae_ignore_keys = {"model_ema.decay", "model_ema.num_updates"}
default_vae_dict = {"auto": "auto", "None": "None"}
default_vae_list = ["auto", "None"]
default_vae_values = [default_vae_dict[x] for x in default_vae_list]
vae_dict = dict(default_vae_dict)
vae_list = list(default_vae_list)
first_load = True
base_vae = None
loaded_vae_file = None
checkpoint_info = None
def get_base_vae(model):
if base_vae is not None and checkpoint_info == model.sd_checkpoint_info and model:
return base_vae
return None
def store_base_vae(model):
global base_vae, checkpoint_info
if checkpoint_info != model.sd_checkpoint_info:
base_vae = model.first_stage_model.state_dict().copy()
checkpoint_info = model.sd_checkpoint_info
def delete_base_vae():
global base_vae, checkpoint_info
base_vae = None
checkpoint_info = None
def restore_base_vae(model):
global base_vae, checkpoint_info
if base_vae is not None and checkpoint_info == model.sd_checkpoint_info:
load_vae_dict(model, base_vae)
delete_base_vae()
def get_filename(filepath):
return os.path.splitext(os.path.basename(filepath))[0]
def refresh_vae_list(vae_path=vae_path, model_path=model_path):
global vae_dict, vae_list
res = {}
candidates = [
*glob.iglob(os.path.join(model_path, '**/*.vae.ckpt'), recursive=True),
*glob.iglob(os.path.join(model_path, '**/*.vae.pt'), recursive=True),
*glob.iglob(os.path.join(vae_path, '**/*.ckpt'), recursive=True),
*glob.iglob(os.path.join(vae_path, '**/*.pt'), recursive=True)
]
if shared.cmd_opts.vae_path is not None and os.path.isfile(shared.cmd_opts.vae_path):
candidates.append(shared.cmd_opts.vae_path)
for filepath in candidates:
name = get_filename(filepath)
res[name] = filepath
vae_list.clear()
vae_list.extend(default_vae_list)
vae_list.extend(list(res.keys()))
vae_dict.clear()
vae_dict.update(res)
vae_dict.update(default_vae_dict)
return vae_list
def get_vae_from_settings(vae_file="auto"):
# else, we load from settings, if not set to be default
if vae_file == "auto" and shared.opts.sd_vae is not None:
# if saved VAE settings isn't recognized, fallback to auto
vae_file = vae_dict.get(shared.opts.sd_vae, "auto")
# if VAE selected but not found, fallback to auto
if vae_file not in default_vae_values and not os.path.isfile(vae_file):
vae_file = "auto"
print(f"Selected VAE doesn't exist: {vae_file}")
return vae_file
def resolve_vae(checkpoint_file=None, vae_file="auto"):
global first_load, vae_dict, vae_list
# if vae_file argument is provided, it takes priority, but not saved
if vae_file and vae_file not in default_vae_list:
if not os.path.isfile(vae_file):
print(f"VAE provided as function argument doesn't exist: {vae_file}")
vae_file = "auto"
# for the first load, if vae-path is provided, it takes priority, saved, and failure is reported
if first_load and shared.cmd_opts.vae_path is not None:
if os.path.isfile(shared.cmd_opts.vae_path):
vae_file = shared.cmd_opts.vae_path
shared.opts.data['sd_vae'] = get_filename(vae_file)
else:
print(f"VAE provided as command line argument doesn't exist: {vae_file}")
# fallback to selector in settings, if vae selector not set to act as default fallback
if not shared.opts.sd_vae_as_default:
vae_file = get_vae_from_settings(vae_file)
# vae-path cmd arg takes priority for auto
if vae_file == "auto" and shared.cmd_opts.vae_path is not None:
if os.path.isfile(shared.cmd_opts.vae_path):
vae_file = shared.cmd_opts.vae_path
print(f"Using VAE provided as command line argument: {vae_file}")
# if still not found, try look for ".vae.pt" beside model
model_path = os.path.splitext(checkpoint_file)[0]
if vae_file == "auto":
vae_file_try = model_path + ".vae.pt"
if os.path.isfile(vae_file_try):
vae_file = vae_file_try
print(f"Using VAE found similar to selected model: {vae_file}")
# if still not found, try look for ".vae.ckpt" beside model
if vae_file == "auto":
vae_file_try = model_path + ".vae.ckpt"
if os.path.isfile(vae_file_try):
vae_file = vae_file_try
print(f"Using VAE found similar to selected model: {vae_file}")
# No more fallbacks for auto
if vae_file == "auto":
vae_file = None
# Last check, just because
if vae_file and not os.path.exists(vae_file):
vae_file = None
return vae_file
def load_vae(model, vae_file=None):
global first_load, vae_dict, vae_list, loaded_vae_file
# save_settings = False
if vae_file:
assert os.path.isfile(vae_file), f"VAE file doesn't exist: {vae_file}"
print(f"Loading VAE weights from: {vae_file}")
vae_ckpt = torch.load(vae_file, map_location=shared.weight_load_location)
vae_dict_1 = {k: v for k, v in vae_ckpt["state_dict"].items() if k[0:4] != "loss" and k not in vae_ignore_keys}
load_vae_dict(model, vae_dict_1)
# If vae used is not in dict, update it
# It will be removed on refresh though
vae_opt = get_filename(vae_file)
if vae_opt not in vae_dict:
vae_dict[vae_opt] = vae_file
vae_list.append(vae_opt)
loaded_vae_file = vae_file
"""
# Save current VAE to VAE settings, maybe? will it work?
if save_settings:
if vae_file is None:
vae_opt = "None"
# shared.opts.sd_vae = vae_opt
"""
first_load = False
# don't call this from outside
def load_vae_dict(model, vae_dict_1=None):
if vae_dict_1:
store_base_vae(model)
model.first_stage_model.load_state_dict(vae_dict_1)
else:
restore_base_vae()
model.first_stage_model.to(devices.dtype_vae)
def reload_vae_weights(sd_model=None, vae_file="auto"):
from modules import lowvram, devices, sd_hijack
if not sd_model:
sd_model = shared.sd_model
checkpoint_info = sd_model.sd_checkpoint_info
checkpoint_file = checkpoint_info.filename
vae_file = resolve_vae(checkpoint_file, vae_file=vae_file)
if loaded_vae_file == vae_file:
return
if shared.cmd_opts.lowvram or shared.cmd_opts.medvram:
lowvram.send_everything_to_cpu()
else:
sd_model.to(devices.cpu)
sd_hijack.model_hijack.undo_hijack(sd_model)
load_vae(sd_model, vae_file)
sd_hijack.model_hijack.hijack(sd_model)
script_callbacks.model_loaded_callback(sd_model)
if not shared.cmd_opts.lowvram and not shared.cmd_opts.medvram:
sd_model.to(devices.device)
print(f"VAE Weights loaded.")
return sd_model
<|code_end|>
modules/shared.py
<|code_start|>import argparse
import datetime
import json
import os
import sys
import time
import gradio as gr
import tqdm
import modules.artists
import modules.interrogate
import modules.memmon
import modules.sd_models
import modules.styles
import modules.devices as devices
from modules import sd_samplers, sd_models, localization, sd_vae, extensions, script_loading
from modules.hypernetworks import hypernetwork
from modules.paths import models_path, script_path, sd_path
sd_model_file = os.path.join(script_path, 'model.ckpt')
default_sd_model_file = sd_model_file
parser = argparse.ArgumentParser()
parser.add_argument("--config", type=str, default=os.path.join(sd_path, "configs/stable-diffusion/v1-inference.yaml"), help="path to config which constructs model",)
parser.add_argument("--ckpt", type=str, default=sd_model_file, help="path to checkpoint of stable diffusion model; if specified, this checkpoint will be added to the list of checkpoints and loaded",)
parser.add_argument("--ckpt-dir", type=str, default=None, help="Path to directory with stable diffusion checkpoints")
parser.add_argument("--gfpgan-dir", type=str, help="GFPGAN directory", default=('./src/gfpgan' if os.path.exists('./src/gfpgan') else './GFPGAN'))
parser.add_argument("--gfpgan-model", type=str, help="GFPGAN model file name", default=None)
parser.add_argument("--no-half", action='store_true', help="do not switch the model to 16-bit floats")
parser.add_argument("--no-half-vae", action='store_true', help="do not switch the VAE model to 16-bit floats")
parser.add_argument("--no-progressbar-hiding", action='store_true', help="do not hide progressbar in gradio UI (we hide it because it slows down ML if you have hardware acceleration in browser)")
parser.add_argument("--max-batch-count", type=int, default=16, help="maximum batch count value for the UI")
parser.add_argument("--embeddings-dir", type=str, default=os.path.join(script_path, 'embeddings'), help="embeddings directory for textual inversion (default: embeddings)")
parser.add_argument("--hypernetwork-dir", type=str, default=os.path.join(models_path, 'hypernetworks'), help="hypernetwork directory")
parser.add_argument("--localizations-dir", type=str, default=os.path.join(script_path, 'localizations'), help="localizations directory")
parser.add_argument("--allow-code", action='store_true', help="allow custom script execution from webui")
parser.add_argument("--medvram", action='store_true', help="enable stable diffusion model optimizations for sacrificing a little speed for low VRM usage")
parser.add_argument("--lowvram", action='store_true', help="enable stable diffusion model optimizations for sacrificing a lot of speed for very low VRM usage")
parser.add_argument("--lowram", action='store_true', help="load stable diffusion checkpoint weights to VRAM instead of RAM")
parser.add_argument("--always-batch-cond-uncond", action='store_true', help="disables cond/uncond batching that is enabled to save memory with --medvram or --lowvram")
parser.add_argument("--unload-gfpgan", action='store_true', help="does not do anything.")
parser.add_argument("--precision", type=str, help="evaluate at this precision", choices=["full", "autocast"], default="autocast")
parser.add_argument("--share", action='store_true', help="use share=True for gradio and make the UI accessible through their site")
parser.add_argument("--ngrok", type=str, help="ngrok authtoken, alternative to gradio --share", default=None)
parser.add_argument("--ngrok-region", type=str, help="The region in which ngrok should start.", default="us")
parser.add_argument("--enable-insecure-extension-access", action='store_true', help="enable extensions tab regardless of other options")
parser.add_argument("--codeformer-models-path", type=str, help="Path to directory with codeformer model file(s).", default=os.path.join(models_path, 'Codeformer'))
parser.add_argument("--gfpgan-models-path", type=str, help="Path to directory with GFPGAN model file(s).", default=os.path.join(models_path, 'GFPGAN'))
parser.add_argument("--esrgan-models-path", type=str, help="Path to directory with ESRGAN model file(s).", default=os.path.join(models_path, 'ESRGAN'))
parser.add_argument("--bsrgan-models-path", type=str, help="Path to directory with BSRGAN model file(s).", default=os.path.join(models_path, 'BSRGAN'))
parser.add_argument("--realesrgan-models-path", type=str, help="Path to directory with RealESRGAN model file(s).", default=os.path.join(models_path, 'RealESRGAN'))
parser.add_argument("--scunet-models-path", type=str, help="Path to directory with ScuNET model file(s).", default=os.path.join(models_path, 'ScuNET'))
parser.add_argument("--swinir-models-path", type=str, help="Path to directory with SwinIR model file(s).", default=os.path.join(models_path, 'SwinIR'))
parser.add_argument("--ldsr-models-path", type=str, help="Path to directory with LDSR model file(s).", default=os.path.join(models_path, 'LDSR'))
parser.add_argument("--clip-models-path", type=str, help="Path to directory with CLIP model file(s).", default=None)
parser.add_argument("--xformers", action='store_true', help="enable xformers for cross attention layers")
parser.add_argument("--force-enable-xformers", action='store_true', help="enable xformers for cross attention layers regardless of whether the checking code thinks you can run it; do not make bug reports if this fails to work")
parser.add_argument("--deepdanbooru", action='store_true', help="enable deepdanbooru interrogator")
parser.add_argument("--opt-split-attention", action='store_true', help="force-enables Doggettx's cross-attention layer optimization. By default, it's on for torch cuda.")
parser.add_argument("--opt-split-attention-invokeai", action='store_true', help="force-enables InvokeAI's cross-attention layer optimization. By default, it's on when cuda is unavailable.")
parser.add_argument("--opt-split-attention-v1", action='store_true', help="enable older version of split attention optimization that does not consume all the VRAM it can find")
parser.add_argument("--disable-opt-split-attention", action='store_true', help="force-disables cross-attention layer optimization")
parser.add_argument("--use-cpu", nargs='+',choices=['all', 'sd', 'interrogate', 'gfpgan', 'swinir', 'esrgan', 'scunet', 'codeformer'], help="use CPU as torch device for specified modules", default=[], type=str.lower)
parser.add_argument("--listen", action='store_true', help="launch gradio with 0.0.0.0 as server name, allowing to respond to network requests")
parser.add_argument("--port", type=int, help="launch gradio with given server port, you need root/admin rights for ports < 1024, defaults to 7860 if available", default=None)
parser.add_argument("--show-negative-prompt", action='store_true', help="does not do anything", default=False)
parser.add_argument("--ui-config-file", type=str, help="filename to use for ui configuration", default=os.path.join(script_path, 'ui-config.json'))
parser.add_argument("--hide-ui-dir-config", action='store_true', help="hide directory configuration from webui", default=False)
parser.add_argument("--freeze-settings", action='store_true', help="disable editing settings", default=False)
parser.add_argument("--ui-settings-file", type=str, help="filename to use for ui settings", default=os.path.join(script_path, 'config.json'))
parser.add_argument("--gradio-debug", action='store_true', help="launch gradio with --debug option")
parser.add_argument("--gradio-auth", type=str, help='set gradio authentication like "username:password"; or comma-delimit multiple like "u1:p1,u2:p2,u3:p3"', default=None)
parser.add_argument("--gradio-img2img-tool", type=str, help='gradio image uploader tool: can be either editor for ctopping, or color-sketch for drawing', choices=["color-sketch", "editor"], default="editor")
parser.add_argument("--opt-channelslast", action='store_true', help="change memory type for stable diffusion to channels last")
parser.add_argument("--styles-file", type=str, help="filename to use for styles", default=os.path.join(script_path, 'styles.csv'))
parser.add_argument("--autolaunch", action='store_true', help="open the webui URL in the system's default browser upon launch", default=False)
parser.add_argument("--theme", type=str, help="launches the UI with light or dark theme", default=None)
parser.add_argument("--use-textbox-seed", action='store_true', help="use textbox for seeds in UI (no up/down, but possible to input long seeds)", default=False)
parser.add_argument("--disable-console-progressbars", action='store_true', help="do not output progressbars to console", default=False)
parser.add_argument("--enable-console-prompts", action='store_true', help="print prompts to console when generating with txt2img and img2img", default=False)
parser.add_argument('--vae-path', type=str, help='Path to Variational Autoencoders model', default=None)
parser.add_argument("--disable-safe-unpickle", action='store_true', help="disable checking pytorch models for malicious code", default=False)
parser.add_argument("--api", action='store_true', help="use api=True to launch the api with the webui")
parser.add_argument("--nowebui", action='store_true', help="use api=True to launch the api instead of the webui")
parser.add_argument("--ui-debug-mode", action='store_true', help="Don't load model to quickly launch UI")
parser.add_argument("--device-id", type=str, help="Select the default CUDA device to use (export CUDA_VISIBLE_DEVICES=0,1,etc might be needed before)", default=None)
parser.add_argument("--administrator", action='store_true', help="Administrator rights", default=False)
parser.add_argument("--cors-allow-origins", type=str, help="Allowed CORS origins", default=None)
parser.add_argument("--tls-keyfile", type=str, help="Partially enables TLS, requires --tls-certfile to fully function", default=None)
parser.add_argument("--tls-certfile", type=str, help="Partially enables TLS, requires --tls-keyfile to fully function", default=None)
parser.add_argument("--server-name", type=str, help="Sets hostname of server", default=None)
script_loading.preload_extensions(extensions.extensions_dir, parser)
cmd_opts = parser.parse_args()
restricted_opts = {
"samples_filename_pattern",
"directories_filename_pattern",
"outdir_samples",
"outdir_txt2img_samples",
"outdir_img2img_samples",
"outdir_extras_samples",
"outdir_grids",
"outdir_txt2img_grids",
"outdir_save",
}
cmd_opts.disable_extension_access = (cmd_opts.share or cmd_opts.listen) and not cmd_opts.enable_insecure_extension_access
devices.device, devices.device_interrogate, devices.device_gfpgan, devices.device_swinir, devices.device_esrgan, devices.device_scunet, devices.device_codeformer = \
(devices.cpu if any(y in cmd_opts.use_cpu for y in [x, 'all']) else devices.get_optimal_device() for x in ['sd', 'interrogate', 'gfpgan', 'swinir', 'esrgan', 'scunet', 'codeformer'])
device = devices.device
weight_load_location = None if cmd_opts.lowram else "cpu"
batch_cond_uncond = cmd_opts.always_batch_cond_uncond or not (cmd_opts.lowvram or cmd_opts.medvram)
parallel_processing_allowed = not cmd_opts.lowvram and not cmd_opts.medvram
xformers_available = False
config_filename = cmd_opts.ui_settings_file
os.makedirs(cmd_opts.hypernetwork_dir, exist_ok=True)
hypernetworks = hypernetwork.list_hypernetworks(cmd_opts.hypernetwork_dir)
loaded_hypernetwork = None
def reload_hypernetworks():
global hypernetworks
hypernetworks = hypernetwork.list_hypernetworks(cmd_opts.hypernetwork_dir)
hypernetwork.load_hypernetwork(opts.sd_hypernetwork)
class State:
skipped = False
interrupted = False
job = ""
job_no = 0
job_count = 0
job_timestamp = '0'
sampling_step = 0
sampling_steps = 0
current_latent = None
current_image = None
current_image_sampling_step = 0
textinfo = None
time_start = None
need_restart = False
def skip(self):
self.skipped = True
def interrupt(self):
self.interrupted = True
def nextjob(self):
if opts.show_progress_every_n_steps == -1:
self.do_set_current_image()
self.job_no += 1
self.sampling_step = 0
self.current_image_sampling_step = 0
def dict(self):
obj = {
"skipped": self.skipped,
"interrupted": self.skipped,
"job": self.job,
"job_count": self.job_count,
"job_no": self.job_no,
"sampling_step": self.sampling_step,
"sampling_steps": self.sampling_steps,
}
return obj
def begin(self):
self.sampling_step = 0
self.job_count = -1
self.job_no = 0
self.job_timestamp = datetime.datetime.now().strftime("%Y%m%d%H%M%S")
self.current_latent = None
self.current_image = None
self.current_image_sampling_step = 0
self.skipped = False
self.interrupted = False
self.textinfo = None
self.time_start = time.time()
devices.torch_gc()
def end(self):
self.job = ""
self.job_count = 0
devices.torch_gc()
"""sets self.current_image from self.current_latent if enough sampling steps have been made after the last call to this"""
def set_current_image(self):
if self.sampling_step - self.current_image_sampling_step >= opts.show_progress_every_n_steps and opts.show_progress_every_n_steps > 0:
self.do_set_current_image()
def do_set_current_image(self):
if not parallel_processing_allowed:
return
if self.current_latent is None:
return
if opts.show_progress_grid:
self.current_image = sd_samplers.samples_to_image_grid(self.current_latent)
else:
self.current_image = sd_samplers.sample_to_image(self.current_latent)
self.current_image_sampling_step = self.sampling_step
state = State()
artist_db = modules.artists.ArtistsDatabase(os.path.join(script_path, 'artists.csv'))
styles_filename = cmd_opts.styles_file
prompt_styles = modules.styles.StyleDatabase(styles_filename)
interrogator = modules.interrogate.InterrogateModels("interrogate")
face_restorers = []
def realesrgan_models_names():
import modules.realesrgan_model
return [x.name for x in modules.realesrgan_model.get_realesrgan_models(None)]
class OptionInfo:
def __init__(self, default=None, label="", component=None, component_args=None, onchange=None, section=None, refresh=None):
self.default = default
self.label = label
self.component = component
self.component_args = component_args
self.onchange = onchange
self.section = section
self.refresh = refresh
def options_section(section_identifier, options_dict):
for k, v in options_dict.items():
v.section = section_identifier
return options_dict
hide_dirs = {"visible": not cmd_opts.hide_ui_dir_config}
options_templates = {}
options_templates.update(options_section(('saving-images', "Saving images/grids"), {
"samples_save": OptionInfo(True, "Always save all generated images"),
"samples_format": OptionInfo('png', 'File format for images'),
"samples_filename_pattern": OptionInfo("", "Images filename pattern", component_args=hide_dirs),
"save_images_add_number": OptionInfo(True, "Add number to filename when saving", component_args=hide_dirs),
"grid_save": OptionInfo(True, "Always save all generated image grids"),
"grid_format": OptionInfo('png', 'File format for grids'),
"grid_extended_filename": OptionInfo(False, "Add extended info (seed, prompt) to filename when saving grid"),
"grid_only_if_multiple": OptionInfo(True, "Do not save grids consisting of one picture"),
"grid_prevent_empty_spots": OptionInfo(False, "Prevent empty spots in grid (when set to autodetect)"),
"n_rows": OptionInfo(-1, "Grid row count; use -1 for autodetect and 0 for it to be same as batch size", gr.Slider, {"minimum": -1, "maximum": 16, "step": 1}),
"enable_pnginfo": OptionInfo(True, "Save text information about generation parameters as chunks to png files"),
"save_txt": OptionInfo(False, "Create a text file next to every image with generation parameters."),
"save_images_before_face_restoration": OptionInfo(False, "Save a copy of image before doing face restoration."),
"save_images_before_highres_fix": OptionInfo(False, "Save a copy of image before applying highres fix."),
"save_images_before_color_correction": OptionInfo(False, "Save a copy of image before applying color correction to img2img results"),
"jpeg_quality": OptionInfo(80, "Quality for saved jpeg images", gr.Slider, {"minimum": 1, "maximum": 100, "step": 1}),
"export_for_4chan": OptionInfo(True, "If PNG image is larger than 4MB or any dimension is larger than 4000, downscale and save copy as JPG"),
"use_original_name_batch": OptionInfo(False, "Use original name for output filename during batch process in extras tab"),
"save_selected_only": OptionInfo(True, "When using 'Save' button, only save a single selected image"),
"do_not_add_watermark": OptionInfo(False, "Do not add watermark to images"),
}))
options_templates.update(options_section(('saving-paths', "Paths for saving"), {
"outdir_samples": OptionInfo("", "Output directory for images; if empty, defaults to three directories below", component_args=hide_dirs),
"outdir_txt2img_samples": OptionInfo("outputs/txt2img-images", 'Output directory for txt2img images', component_args=hide_dirs),
"outdir_img2img_samples": OptionInfo("outputs/img2img-images", 'Output directory for img2img images', component_args=hide_dirs),
"outdir_extras_samples": OptionInfo("outputs/extras-images", 'Output directory for images from extras tab', component_args=hide_dirs),
"outdir_grids": OptionInfo("", "Output directory for grids; if empty, defaults to two directories below", component_args=hide_dirs),
"outdir_txt2img_grids": OptionInfo("outputs/txt2img-grids", 'Output directory for txt2img grids', component_args=hide_dirs),
"outdir_img2img_grids": OptionInfo("outputs/img2img-grids", 'Output directory for img2img grids', component_args=hide_dirs),
"outdir_save": OptionInfo("log/images", "Directory for saving images using the Save button", component_args=hide_dirs),
}))
options_templates.update(options_section(('saving-to-dirs', "Saving to a directory"), {
"save_to_dirs": OptionInfo(False, "Save images to a subdirectory"),
"grid_save_to_dirs": OptionInfo(False, "Save grids to a subdirectory"),
"use_save_to_dirs_for_ui": OptionInfo(False, "When using \"Save\" button, save images to a subdirectory"),
"directories_filename_pattern": OptionInfo("", "Directory name pattern", component_args=hide_dirs),
"directories_max_prompt_words": OptionInfo(8, "Max prompt words for [prompt_words] pattern", gr.Slider, {"minimum": 1, "maximum": 20, "step": 1, **hide_dirs}),
}))
options_templates.update(options_section(('upscaling', "Upscaling"), {
"ESRGAN_tile": OptionInfo(192, "Tile size for ESRGAN upscalers. 0 = no tiling.", gr.Slider, {"minimum": 0, "maximum": 512, "step": 16}),
"ESRGAN_tile_overlap": OptionInfo(8, "Tile overlap, in pixels for ESRGAN upscalers. Low values = visible seam.", gr.Slider, {"minimum": 0, "maximum": 48, "step": 1}),
"realesrgan_enabled_models": OptionInfo(["R-ESRGAN x4+", "R-ESRGAN x4+ Anime6B"], "Select which Real-ESRGAN models to show in the web UI. (Requires restart)", gr.CheckboxGroup, lambda: {"choices": realesrgan_models_names()}),
"SWIN_tile": OptionInfo(192, "Tile size for all SwinIR.", gr.Slider, {"minimum": 16, "maximum": 512, "step": 16}),
"SWIN_tile_overlap": OptionInfo(8, "Tile overlap, in pixels for SwinIR. Low values = visible seam.", gr.Slider, {"minimum": 0, "maximum": 48, "step": 1}),
"ldsr_steps": OptionInfo(100, "LDSR processing steps. Lower = faster", gr.Slider, {"minimum": 1, "maximum": 200, "step": 1}),
"upscaler_for_img2img": OptionInfo(None, "Upscaler for img2img", gr.Dropdown, lambda: {"choices": [x.name for x in sd_upscalers]}),
"use_scale_latent_for_hires_fix": OptionInfo(False, "Upscale latent space image when doing hires. fix"),
}))
options_templates.update(options_section(('face-restoration', "Face restoration"), {
"face_restoration_model": OptionInfo(None, "Face restoration model", gr.Radio, lambda: {"choices": [x.name() for x in face_restorers]}),
"code_former_weight": OptionInfo(0.5, "CodeFormer weight parameter; 0 = maximum effect; 1 = minimum effect", gr.Slider, {"minimum": 0, "maximum": 1, "step": 0.01}),
"face_restoration_unload": OptionInfo(False, "Move face restoration model from VRAM into RAM after processing"),
}))
options_templates.update(options_section(('system', "System"), {
"memmon_poll_rate": OptionInfo(8, "VRAM usage polls per second during generation. Set to 0 to disable.", gr.Slider, {"minimum": 0, "maximum": 40, "step": 1}),
"samples_log_stdout": OptionInfo(False, "Always print all generation info to standard output"),
"multiple_tqdm": OptionInfo(True, "Add a second progress bar to the console that shows progress for an entire job."),
}))
options_templates.update(options_section(('training', "Training"), {
"unload_models_when_training": OptionInfo(False, "Move VAE and CLIP to RAM when training if possible. Saves VRAM."),
"shuffle_tags": OptionInfo(False, "Shuffleing tags by ',' when create texts."),
"tag_drop_out": OptionInfo(0, "Dropout tags when create texts", gr.Slider, {"minimum": 0, "maximum": 1, "step": 0.1}),
"save_optimizer_state": OptionInfo(False, "Saves Optimizer state as separate *.optim file. Training can be resumed with HN itself and matching optim file."),
"dataset_filename_word_regex": OptionInfo("", "Filename word regex"),
"dataset_filename_join_string": OptionInfo(" ", "Filename join string"),
"training_image_repeats_per_epoch": OptionInfo(1, "Number of repeats for a single input image per epoch; used only for displaying epoch number", gr.Number, {"precision": 0}),
"training_write_csv_every": OptionInfo(500, "Save an csv containing the loss to log directory every N steps, 0 to disable"),
"training_xattention_optimizations": OptionInfo(False, "Use cross attention optimizations while training"),
}))
options_templates.update(options_section(('sd', "Stable Diffusion"), {
"sd_model_checkpoint": OptionInfo(None, "Stable Diffusion checkpoint", gr.Dropdown, lambda: {"choices": modules.sd_models.checkpoint_tiles()}, refresh=sd_models.list_models),
"sd_checkpoint_cache": OptionInfo(0, "Checkpoints to cache in RAM", gr.Slider, {"minimum": 0, "maximum": 10, "step": 1}),
"sd_vae": OptionInfo("auto", "SD VAE", gr.Dropdown, lambda: {"choices": sd_vae.vae_list}, refresh=sd_vae.refresh_vae_list),
"sd_vae_as_default": OptionInfo(False, "Ignore selected VAE for stable diffusion checkpoints that have their own .vae.pt next to them"),
"sd_hypernetwork": OptionInfo("None", "Hypernetwork", gr.Dropdown, lambda: {"choices": ["None"] + [x for x in hypernetworks.keys()]}, refresh=reload_hypernetworks),
"sd_hypernetwork_strength": OptionInfo(1.0, "Hypernetwork strength", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.001}),
"inpainting_mask_weight": OptionInfo(1.0, "Inpainting conditioning mask strength", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}),
"img2img_color_correction": OptionInfo(False, "Apply color correction to img2img results to match original colors."),
"img2img_fix_steps": OptionInfo(False, "With img2img, do exactly the amount of steps the slider specifies (normally you'd do less with less denoising)."),
"enable_quantization": OptionInfo(False, "Enable quantization in K samplers for sharper and cleaner results. This may change existing seeds. Requires restart to apply."),
"enable_emphasis": OptionInfo(True, "Emphasis: use (text) to make model pay more attention to text and [text] to make it pay less attention"),
"use_old_emphasis_implementation": OptionInfo(False, "Use old emphasis implementation. Can be useful to reproduce old seeds."),
"enable_batch_seeds": OptionInfo(True, "Make K-diffusion samplers produce same images in a batch as when making a single image"),
"comma_padding_backtrack": OptionInfo(20, "Increase coherency by padding from the last comma within n tokens when using more than 75 tokens", gr.Slider, {"minimum": 0, "maximum": 74, "step": 1 }),
"filter_nsfw": OptionInfo(False, "Filter NSFW content"),
'CLIP_stop_at_last_layers': OptionInfo(1, "Stop At last layers of CLIP model", gr.Slider, {"minimum": 1, "maximum": 12, "step": 1}),
"random_artist_categories": OptionInfo([], "Allowed categories for random artists selection when using the Roll button", gr.CheckboxGroup, {"choices": artist_db.categories()}),
}))
options_templates.update(options_section(('interrogate', "Interrogate Options"), {
"interrogate_keep_models_in_memory": OptionInfo(False, "Interrogate: keep models in VRAM"),
"interrogate_use_builtin_artists": OptionInfo(True, "Interrogate: use artists from artists.csv"),
"interrogate_return_ranks": OptionInfo(False, "Interrogate: include ranks of model tags matches in results (Has no effect on caption-based interrogators)."),
"interrogate_clip_num_beams": OptionInfo(1, "Interrogate: num_beams for BLIP", gr.Slider, {"minimum": 1, "maximum": 16, "step": 1}),
"interrogate_clip_min_length": OptionInfo(24, "Interrogate: minimum description length (excluding artists, etc..)", gr.Slider, {"minimum": 1, "maximum": 128, "step": 1}),
"interrogate_clip_max_length": OptionInfo(48, "Interrogate: maximum description length", gr.Slider, {"minimum": 1, "maximum": 256, "step": 1}),
"interrogate_clip_dict_limit": OptionInfo(1500, "CLIP: maximum number of lines in text file (0 = No limit)"),
"interrogate_deepbooru_score_threshold": OptionInfo(0.5, "Interrogate: deepbooru score threshold", gr.Slider, {"minimum": 0, "maximum": 1, "step": 0.01}),
"deepbooru_sort_alpha": OptionInfo(True, "Interrogate: deepbooru sort alphabetically"),
"deepbooru_use_spaces": OptionInfo(False, "use spaces for tags in deepbooru"),
"deepbooru_escape": OptionInfo(True, "escape (\\) brackets in deepbooru (so they are used as literal brackets and not for emphasis)"),
}))
options_templates.update(options_section(('ui', "User interface"), {
"show_progressbar": OptionInfo(True, "Show progressbar"),
"show_progress_every_n_steps": OptionInfo(0, "Show image creation progress every N sampling steps. Set to 0 to disable. Set to -1 to show after completion of batch.", gr.Slider, {"minimum": -1, "maximum": 32, "step": 1}),
"show_progress_grid": OptionInfo(True, "Show previews of all images generated in a batch as a grid"),
"return_grid": OptionInfo(True, "Show grid in results for web"),
"do_not_show_images": OptionInfo(False, "Do not show any images in results for web"),
"add_model_hash_to_info": OptionInfo(True, "Add model hash to generation information"),
"add_model_name_to_info": OptionInfo(False, "Add model name to generation information"),
"disable_weights_auto_swap": OptionInfo(False, "When reading generation parameters from text into UI (from PNG info or pasted text), do not change the selected model/checkpoint."),
"send_seed": OptionInfo(True, "Send seed when sending prompt or image to other interface"),
"font": OptionInfo("", "Font for image grids that have text"),
"js_modal_lightbox": OptionInfo(True, "Enable full page image viewer"),
"js_modal_lightbox_initially_zoomed": OptionInfo(True, "Show images zoomed in by default in full page image viewer"),
"show_progress_in_title": OptionInfo(True, "Show generation progress in window title."),
'quicksettings': OptionInfo("sd_model_checkpoint", "Quicksettings list"),
'localization': OptionInfo("None", "Localization (requires restart)", gr.Dropdown, lambda: {"choices": ["None"] + list(localization.localizations.keys())}, refresh=lambda: localization.list_localizations(cmd_opts.localizations_dir)),
}))
options_templates.update(options_section(('sampler-params', "Sampler parameters"), {
"hide_samplers": OptionInfo([], "Hide samplers in user interface (requires restart)", gr.CheckboxGroup, lambda: {"choices": [x.name for x in sd_samplers.all_samplers]}),
"eta_ddim": OptionInfo(0.0, "eta (noise multiplier) for DDIM", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}),
"eta_ancestral": OptionInfo(1.0, "eta (noise multiplier) for ancestral samplers", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}),
"ddim_discretize": OptionInfo('uniform', "img2img DDIM discretize", gr.Radio, {"choices": ['uniform', 'quad']}),
's_churn': OptionInfo(0.0, "sigma churn", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}),
's_tmin': OptionInfo(0.0, "sigma tmin", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}),
's_noise': OptionInfo(1.0, "sigma noise", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}),
'eta_noise_seed_delta': OptionInfo(0, "Eta noise seed delta", gr.Number, {"precision": 0}),
}))
options_templates.update(options_section((None, "Hidden options"), {
"disabled_extensions": OptionInfo([], "Disable those extensions"),
}))
options_templates.update()
class Options:
data = None
data_labels = options_templates
typemap = {int: float}
def __init__(self):
self.data = {k: v.default for k, v in self.data_labels.items()}
def __setattr__(self, key, value):
if self.data is not None:
if key in self.data or key in self.data_labels:
assert not cmd_opts.freeze_settings, "changing settings is disabled"
info = opts.data_labels.get(key, None)
comp_args = info.component_args if info else None
if isinstance(comp_args, dict) and comp_args.get('visible', True) is False:
raise RuntimeError(f"not possible to set {key} because it is restricted")
if cmd_opts.hide_ui_dir_config and key in restricted_opts:
raise RuntimeError(f"not possible to set {key} because it is restricted")
self.data[key] = value
return
return super(Options, self).__setattr__(key, value)
def __getattr__(self, item):
if self.data is not None:
if item in self.data:
return self.data[item]
if item in self.data_labels:
return self.data_labels[item].default
return super(Options, self).__getattribute__(item)
def save(self, filename):
assert not cmd_opts.freeze_settings, "saving settings is disabled"
with open(filename, "w", encoding="utf8") as file:
json.dump(self.data, file, indent=4)
def same_type(self, x, y):
if x is None or y is None:
return True
type_x = self.typemap.get(type(x), type(x))
type_y = self.typemap.get(type(y), type(y))
return type_x == type_y
def load(self, filename):
with open(filename, "r", encoding="utf8") as file:
self.data = json.load(file)
bad_settings = 0
for k, v in self.data.items():
info = self.data_labels.get(k, None)
if info is not None and not self.same_type(info.default, v):
print(f"Warning: bad setting value: {k}: {v} ({type(v).__name__}; expected {type(info.default).__name__})", file=sys.stderr)
bad_settings += 1
if bad_settings > 0:
print(f"The program is likely to not work with bad settings.\nSettings file: {filename}\nEither fix the file, or delete it and restart.", file=sys.stderr)
def onchange(self, key, func, call=True):
item = self.data_labels.get(key)
item.onchange = func
if call:
func()
def dumpjson(self):
d = {k: self.data.get(k, self.data_labels.get(k).default) for k in self.data_labels.keys()}
return json.dumps(d)
def add_option(self, key, info):
self.data_labels[key] = info
def reorder(self):
"""reorder settings so that all items related to section always go together"""
section_ids = {}
settings_items = self.data_labels.items()
for k, item in settings_items:
if item.section not in section_ids:
section_ids[item.section] = len(section_ids)
self.data_labels = {k: v for k, v in sorted(settings_items, key=lambda x: section_ids[x[1].section])}
opts = Options()
if os.path.exists(config_filename):
opts.load(config_filename)
sd_upscalers = []
sd_model = None
clip_model = None
progress_print_out = sys.stdout
class TotalTQDM:
def __init__(self):
self._tqdm = None
def reset(self):
self._tqdm = tqdm.tqdm(
desc="Total progress",
total=state.job_count * state.sampling_steps,
position=1,
file=progress_print_out
)
def update(self):
if not opts.multiple_tqdm or cmd_opts.disable_console_progressbars:
return
if self._tqdm is None:
self.reset()
self._tqdm.update()
def updateTotal(self, new_total):
if not opts.multiple_tqdm or cmd_opts.disable_console_progressbars:
return
if self._tqdm is None:
self.reset()
self._tqdm.total=new_total
def clear(self):
if self._tqdm is not None:
self._tqdm.close()
self._tqdm = None
total_tqdm = TotalTQDM()
mem_mon = modules.memmon.MemUsageMonitor("MemMon", device, opts)
mem_mon.start()
def listfiles(dirname):
filenames = [os.path.join(dirname, x) for x in sorted(os.listdir(dirname)) if not x.startswith(".")]
return [file for file in filenames if os.path.isfile(file)]
<|code_end|>
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.