summaryrefslogtreecommitdiff
path: root/image.py
diff options
context:
space:
mode:
authorBen Connors <benconnors@outlook.com>2019-01-24 21:14:02 -0500
committerBen Connors <benconnors@outlook.com>2019-01-24 21:14:02 -0500
commit9b3230d72efc51a504edf102090c219e9dd35f99 (patch)
tree0c6ecfc4c26afda25eda66d5ccdceaf929ebf4d2 /image.py
parentfff5e34c9864532b5e38e70b658eccb0ff35d1d3 (diff)
Move stuff around; fix rendering
- Now actually able to render shows - Maybe able to render chasers - Add untested OLA lighting output - General fixes
Diffstat (limited to 'image.py')
-rwxr-xr-ximage.py181
1 files changed, 0 insertions, 181 deletions
diff --git a/image.py b/image.py
deleted file mode 100755
index 6e47332..0000000
--- a/image.py
+++ /dev/null
@@ -1,181 +0,0 @@
-#!/usr/bin/env python3
-
-"""This module contains functions for visualizing QLC+ functions as images.
-
-For audio support,
-"""
-
-import subprocess as subp
-import tempfile
-import wave
-import os
-import warnings
-from shutil import which
-
-from PIL import Image, ImageDraw, ImageFont
-from .workspace import Show, Function, SHOW
-
-BLUE = (0,0,255)
-RED = (255,0,0)
-WHITE = (255,255,255)
-GRAY = (127,127,127)
-GREEN = (0, 255, 0)
-BLACK = (0,0,0)
-YELLOW = (255,255,0)
-BAMBER = (255,127,0)
-CBLUE = (127,127,255)
-NOGEL = (255,255,127)
-PURPLE = (255,0,255)
-
-def get_wave(fname):
- """Load an audio file into a wave."""
- with tempfile.NamedTemporaryFile(delete=False,suffix=".wav") as f:
- tname = f.name
- subp.call(["ffmpeg", "-i", fname, "-acodec", "pcm_s16le", "-ar", "44100", "-y", tname], stdout=subp.DEVNULL, stderr=subp.DEVNULL)
- return tname,wave.open(tname,mode="rb")
-
-def chunks(l, n):
- """Split the iterable l into chunks of at most n elements."""
- for i in range(0, len(l), n):
- yield l[i:i+n]
-
-def s16letoi(lsb, msb):
- """Convert S16LE (2's complement) to an integer."""
- val = (msb << 8) + lsb
- if msb&0b1000000 > 0: ## Negative
- return (-1)*((1 << 16) - val)
- return val
-
-def render_image(f:Function):
- """Render a QLC+ function.
-
- This function delegates appropriately, based on the type of function.
- """
- if f.type == SHOW:
- return render_image_show(f)
- else:
- raise ValueError("Don't know how to render %s" % f.type)
-
-def render_image_show(s:Show):
- """Render a show to a PIL image."""
- vals, acues = s.render_all()
- vals = list(vals)
-
- values = {}
- for t,cv in vals:
- for c, v in cv:
- if c not in values:
- values[c] = 0
-
- cheight = 270
- offset = 200
- numheight = 100
-
- width = int(vals[-1][0]/10 + 1)+offset
- height = (len(values)+1)*cheight+numheight
-
- ah = lambda y: height-1-y
-
- font = ImageFont.truetype(font="arial.ttf", size=int(offset*0.8))
- nfont = ImageFont.truetype(font="arial.ttf", size=int(numheight*0.8))
-
- colormap = {7:RED, 8:RED, 9:RED, 19: BAMBER, 12: CBLUE, 24: NOGEL, 4: BLUE, 5: BLUE, 6: BLUE,
- 25: GREEN, 26: GREEN, 27: GREEN, 13: YELLOW, 14: YELLOW, 15: YELLOW, 16: YELLOW,
- 18: PURPLE, 22: PURPLE, 21: PURPLE}
-
- order = [7,8,9,25,26,27,4,5,6,12,24,19,13,14,15,16,18,22,21]
-
- im = Image.new("RGB", (width, height), color=WHITE)
- draw = ImageDraw.Draw(im)
- channels = [i for i in order if i in values]+["A"]
- for c in range(len(values)+1):
- draw.line(((0, ah(c*cheight)), (width-1,ah(c*cheight))), fill=GRAY, width=1)
- draw.text((0,ah(c*cheight + 200)), text=str(channels[c]), font=font, fill=GRAY)
- channels = channels[:-1]
-
- draw.line(((offset-1,0),(offset-1,height-1)), fill=GRAY, width=1)
- draw.line(((0,numheight-1), (width-1, numheight-1)), fill=GRAY, width=3)
-
- atime = []
-
- if which("ffmpeg") is None:
- warnings.warn("Cannot find ffmpeg, audio is disabled", RuntimeWarning)
- else:
- for a in acues:
- stime = a[0]
-
- skipperiods = stime//10 + (stime%10 > 0)
-
- ## Add 0 entries as necessary to the audio list
- atime += [0]*max(0,skipperiods-len(atime))
-
- tname, wave = get_wave(a[2])
- nchannels, sampwidth, framerate, nframes, *_ = wave.getparams()
- if sampwidth != 2:
- raise ValueError("Only 16-bit wave is supported")
- skip = framerate//100
- if stime%10 > 0:
- wave.readframes((stime%10)*(skip//10))
- for n,t in enumerate(range(0,nframes, skip), skipperiods):
- asum = 0
- count = 0
- for i in range(skip//10):
- for c in chunks(wave.readframes(10)[:nchannels*sampwidth],sampwidth):
- asum += abs(s16letoi(*c))
- count += 1
- if skip%10 > 0:
- wave.readframes(skip%10)
- if not count:
- break
- if len(atime) > n:
- atime[n] = min(atime[n]+asum/count, 32767)
- else:
- atime.append(asum/count)
-
- wave.close()
- os.remove(tname)
- break
-
- t = 0
- while vals:
- #old = {c: v for c,v in values.items()}
- while vals and vals[0][0] < t:
- _, cv = vals.pop(0)
- values.update(cv)
- curx = t//10 + offset
- if t % 500 == 0:
- draw.line(((curx+(t%2000==0),0), (curx+(t%2000==0), height-1)), fill=GRAY, width = 1 + 3*(t % 2000 == 0))
- if t % 2000 == 0:
- draw.text((curx+1, 5), text=str(t//1000), font=nfont, fill=GRAY)
- for n,c in enumerate(channels):
- v = values[c]
- cury = ah(n*cheight+1+v)
- fill = colormap[c]
- draw.point((curx, cury), fill=fill)
- draw.line(((curx,ah(n*cheight+1)),(curx, cury)), fill=fill, width=1)
- if atime:
- aval = abs(int(atime.pop(0)/256))
- draw.line([(curx, ah(len(values)*cheight+128-aval)),(curx, ah(len(values)*cheight+128+aval))], fill=BLACK, width=1)
- t += 10
-
- return im
-
-if __name__ == "__main__":
- import argparse
-
- from .workspace import Workspace
-
- parser = argparse.ArgumentParser(description="Render a show to an image.")
- parser.add_argument("workspace", type=str, help="Workspace file")
- parser.add_argument("show_id", type=int, help="Show ID")
- parser.add_argument("output", type=str, help="Output file")
- args = parser.parse_args()
-
- print("Loading workspace...")
- w = Workspace.load(args.workspace)
-
- s = w.functions[args.show_id]
-
- print("Rendering show \"%s\"..." % s.name)
- render_image(s).save(args.output)
- print("Done!")