visuals

spiral

just some hypnotic graphics...

the javascript code above is this...

<div style="background-color:black;">
<canvas id="can" width="800" height="600"></canvas>
<script>
var width, height;
var ctx, frameCount= 0;
(function() {
    var can= document.getElementById('can');
    ctx= can.getContext('2d');
    width= can.width;
    height= can.height;
    ctx.fillStyle= '#FFF';
    window.requestAnimationFrame(draw);
})();
function draw() {
    ctx.clearRect(0, 0, width, height);
    ctx.save();
    ctx.translate(width*0.5, height*0.5);
    ctx.beginPath();
    var theta= Math.sin(frameCount*0.001)*Math.PI*2*4;
    for(var y= 0; y<height; y++) {
        for(var i= 0; i<10; i++) {
            ctx.rotate(theta*0.001);
            ctx.fillRect((Math.sin(y*0.1+theta+(i*2))*100), y, 2, 2);
        }
    }
    ctx.restore();
    frameCount= frameCount+1;
    window.requestAnimationFrame(draw);
}
</script>
</div>

originally this was a quick sketch made in processing...

//spiral.pde - processing
void setup() {
  size(800, 600);
  noStroke();
}
void draw() {
  background(0);
  translate(width*0.5, height*0.5);
  float theta= sin(frameCount*0.001)*TWO_PI*4;
  for(int y= 0; y<height; y++) {
    for(int i= 0; i<10; i++) {
      rotate(theta*0.001);
      rect((sin(y*0.1+theta+(i*2))*100), y, 2, 2);
    }
  }
}

and then ported to supercollider...

//spiral.scd - supercollider
(
var width= 800, height= 600;
var win= Window("spiral", Rect(100, 100, width, height), false);
var usr= UserView(win, Rect(0, 0, width, height));
usr.background= Color.black;
usr.animate= true;
usr.drawFunc= {
        var theta= sin(usr.frame*0.001)*2pi*4;
        Pen.fillColor= Color.white;
        Pen.translate(width*0.5, height*0.5);
        height.do{|y|
                10.do{|i|
                        Pen.rotate(theta*0.001);
                        Pen.fillRect(Rect(sin(y*0.1+theta+(i*2))*100, y, 2, 2));
                };
        };
};
CmdPeriod.doOnce({win.close});
win.front;
)

more processing tweets

three new processing tweets...

//--0021

int s=900,i;void setup(){size(1200,s);strokeWeight(99);}void draw(){stroke(9,9);ellipse(i++%1200,millis()%750+99,i%s/350.,(20+i)%99);}// #p5

//--0022

int j,i;void setup(){size(1024,768);}void draw(){translate(512,384);i=frameCount;while(i-->1){rect(j++%i,j%i,3,i/9%9);rotate(0.009);}}// #p5

//--0023

float j=433,i=9e3;size(1024,768,P3D);fill(9,99);beginShape();while(i>0){vertex(sin(i--/99)*j+j,sin(i/j/8)*j*2,cos(i*2)*j);}endShape();// #p5

p50021
p50022
p50023

and more previously

p5 tweets

constrains - i love them. inspired by abe's twitter experiments, i've also played with creating small one line processing programs that are 140 characters long.

below is a video of number 0002, the twitter code tweets and screenshots. note that many but not all are animations. copy and paste the lines into processing (2.0) to try them out.

p5tweet0002 from redFrik on Vimeo.

//--0001

int i;noStroke();size(999,900);for(i=0;i<999;i++){fill(255,0,0,9);rect(i%99,i,i,i);}for(i=0;i<999;i++){fill(0,200,0,3);rect(i,i,i,i);}// #p5

//--0002

int j,i=0;void setup(){size(1200,900,P3D);frameRate(999);}void draw(){for(j=0;j<99;)rect(i++%(1199-j++),int(i/99)%(999-j),i%12,j%16);}// #p5

//--0003

int s=900,j,i=j=0;void setup(){size(s,s);fill(0,9);textSize(80);}void draw(){text(i+j,(sin(i++)/3+0.3)*s,(cos(j+++(i/4e3))/3+0.5)*s);}// #p5

//--0004

int s=900,j,i=j=0;void setup(){size(s,s);stroke(255,9);fill(9,3);}void draw(){quad(i++,j++,j,i,s-i,i-50,s-j,j);i=(i<<j%4)%1200;j=j%s;}// #p5

//--0005

int s=900,i=0;void setup(){size(s,s,P3D);stroke(255,0,0);fill(10,4);}void draw(){translate(i++%s,s/2);rotate(float(i)/s);sphere(i%s);}// #p5

//--0006

int s=900;float i=0;void setup(){size(s,s,P3D);stroke(99,9);fill(0,2);}void draw(){translate(i++%s,s/2);rotate(cos(i/50));box(i%s/3);}// #p5

//--0007

background(0);noStroke();for(float i=0;i<99;i=i+0.0252){size(1200,900,P3D);fill(255,0,0,60);translate(i+9,i);rotate(i*1.8);sphere(i);}// #p5

//--0008

void setup(){size(1600,900);background(255);}void draw(){textSize(millis()%1200);fill(second()*4,0,0,second());text(millis(),10,880);}// #p5

//--0009

float j,i=0;void setup(){size(1200,900,P3D);}void draw(){for(j=0;j<133;j++){rect(9*j+1,sin((i+++j)*0.75/cos(j/99)/5e3)*99+450,9,9);};}// #p5

//--0010

float i,k=450;void setup(){size(900,900,P3D);textSize(k);}void draw(){translate(k,k);fill(i%1*k/2,60);rotate(i+=+.01);text("$",99,0);}// #p5

//--0011

int i,j,k=1200;void setup(){size(k,900);fill(255,200);}void draw(){background(0);for(i=0;i<8e3;)text(i++*j/k%k,i%131*9,i/131*16);j++;}// #p5

//--0012

int j=200,i=900;size(j*6,i,P3D);lights();translate(700,540);for(i=1;i<j;){fill(i/2,50);rotate(j/i);translate(i,i,-2.7);sphere(i+++j);}// #p5

//--0013

int j=480,i=900;size(j*3,i,P3D);noStroke();lights();translate(660,j);for(i=1;i<j;){fill(i,0,0,10);rotate(i/4e4,1.1,2.2,3.3);box(i++);}// #p5

//--0014

int s=900,i=0;void setup(){size(1200,s,P3D);}void draw(){translate(600,450);rotateX(i*.0021);fill(i++%256,30);sphere(sin(i*.0014)*s);}// #p5

//--0015

int i,s=900;void setup(){size(s,s);frameRate(1e4);stroke(255,25);}void draw(){fill(i++%89,0,0,127);rect(i%90*9,i%91*9,i*i%92,i*i%93);}// #p5

//--0016

int i,s=900,t=1200;void setup(){size(t,s);noStroke();}void draw(){fill(i++%256,25);quad(i%t,i/3%s,i/4%t,i%s,i/5%t,i/4%s,i/3%t,i/2%s);}// #p5

//--0017

int t=0;void setup(){size(900,900);background(0);stroke(255,9);}void draw(){translate(450,450);line(sin(t)*421,cos(t++)*400,t%9,t%9);}// #p5

//--0018

int s=900;size(1600,s);fill(255,9);while(s>9){rotate(1e-3);arc(s+420,s,s,s,0,7);arc(1000-s,s+100,s,s,0,7);arc(s+500,400-s,s,s--,0,4);}// #p5

//--0019

int i,j,s=900;void setup(){size(s,s,P3D);smooth(8);}void draw(){stroke(i);line(i,j,s-j,i);if(j%5<1){i=(i+1)%s;}if(i%11<1){j=(j+i)%s;}}// #p5

//--0020

int s=900;void setup(){size(1200,s,P3D);}void draw(){fill(s,50);translate(sin(s)*110+600,cos(s)*99+450);rotate(s);box(s);s=(s+1)%900;}// #p5

p5tweet0001p5tweet0002p5tweet0003p5tweet0004p5tweet0005p5tweet0006p5tweet0007p5tweet0008p5tweet0009p5tweet0010p5tweet0011p5tweet0012p5tweet0013p5tweet0014p5tweet0015p5tweet0016p5tweet0017p5tweet0018p5tweet0019p5tweet0020

cheap 4-channel videoplayer

for the dance piece ich(a) by zufit simon i constructed a system with four raspberry pi mini-computers and buttons to trigger playback of four video streams. as the videos didn't need to run in exact frame-by-frame sync, this was a very cheap way to get four channel high-quality video playback. total cost was about (rpi 28*4)+(sdcard 6*4)+(5v power 1*7) ≈ 141 euro. i chose the model A of the raspberry pi to keep the cost and power consumption down. the four computers share a 5v power supply of 2 amps and are powered over the gpio pins. video cables run 50 meters down to the stage and in to separate flat screen monitors. the monitors are built in to boxes that can be piled up or rolled around independently.

the videos are stored on the 4GB sd cards that also holds the linux operating system. i converted the videos from dvd to mp4 using ffmpeg with the following settings...

ffmpeg -i concat:"/Volumes/MONITOR01_may2012_DVD/VIDEO_TS/VTS_01_1.VOB|/Volumes/MONITOR01_may2012_DVD/VIDEO_TS/VTS_01_2.VOB" -an -vcodec libx264 -profile:v high -preset fast -crf 18 -b-pyramid none -f mp4 MONITOR01_may2012.mp4

that'll take two chapters and convert to a single mp4 and skip the sound track (-an flag).

the python program running on each computer is here below. it plays a video to the end and waits for a button trigger. if a button is pressed before the video is finished, it'll stop and jump to the next video - all in a cyclic fashion.

#f0videoplayer.py
#for a raspberry pi running raspbian
#this script will cycle through videos in sequence when a GPIO pin is grounded

#pinSwi (pulled up internally) - gnd this pin to switch to the next video
#pinOff (pulled up internally) - gnd this to shut down the system

#--settings
videos= ['/home/pi/ICHA1.mp4', '/home/pi/MONITOR01_may2012.mp4', '/home/pi/BLACK.mp4', '/home/pi/FLESH.mp4', '/home/pi/TESTBILDER.mp4']
delays= [0, 0, 0, 0, 0] #extra start delay time in seconds - one value for each video
pinSwi= 23
pinOff= 24

#--
import pexpect
from time import sleep
import RPi.GPIO as GPIO
import os
GPIO.setmode(GPIO.BCM)
GPIO.setup(pinSwi, GPIO.IN, pull_up_down= GPIO.PUD_UP)
GPIO.setup(pinOff, GPIO.IN, pull_up_down= GPIO.PUD_UP)

def main():
        os.system("clear && tput civis")        #clear and hide cursor
        index= 0        #keeps track of which video to play
        while True:
                sleep(delays[index])
                omx= pexpect.spawn('/usr/bin/omxplayer -rp '+videos[index])
                omx.expect('Video')     #play
                while(GPIO.input(pinSwi)==True):
                        sleep(0.1)
                        if GPIO.input(pinOff)==False:
                                omx.send('q')   #quit
                                os.system("tput cnorm && sudo halt")
                                exit()
                omx.send('q')   #quit
                sleep(0.5)              #safety
                while(GPIO.input(pinSwi)==False):
                        sleep(0.01)
                index= (index+1)%len(videos)

if __name__ == "__main__":
        main()

//----------------------------
//--instructions for installing (you'll need a model B to prepare a sd-card, but then move it over to the model A raspberry)

//--prepare the rpi
* use Pi Filler to transfer 2013-05-25-wheezy-raspbian.img to the sdcard
* put the sdcard in rpi model B
* select 'Expand Filesystem' in and enable SSH under advanced in config menu
* select 'Finish' and reboot
* log in with pi/raspberry
* sudo apt-get update
* sudo apt-get upgrade
* sudo apt-get install python-pexpect avahi-daemon

//--copy files from osx
* open a terminal window on main computer
* cd to folder with videos
* edit the file f0videoplayer.py and select which videos to use
* optionally add delaytimes if some videos should start later
* scp f0videoplayer.py MONITOR01_may2012.mp4 ICHA1.mp4 BLACK.mp4 FLESH.mp4 TESTBILDER.mp4 pi@raspberrypi.local:/home/pi/

//--back to model B
* sudo pico /etc/rc.local
* add the following before the exit line: (sleep 1; python /home/pi/f0videoplayer.py) & # autostart video player
* press ctrl+o to save and ctrl+x to exit
* sudo halt

//--start model A
* take out the sdcard from model B and put it in model A
* connect hdmi or composite video, gpio pins and apply power - the first video should start
* ground pin 23 to cycle through the videos
* ground pin 24 to turn off the computer

//----------------------------
//--useful commands (connect keyboard to rpi model A, type pi/raspberry to log in)
sudo pkill omxplayer.bin     #might need to write this without the terminal being visible

if you get "WARNING: REMOTE HOST IDENTIFICATION HAS CHANGED!"
ssh-keygen -R raspberrypi.local     #useful for resetting ssh/scp after changing sd cards

it's not pretty but it's working. some day i'll build it in to a real rackmount box.

//--references:
www.raspberrypi-spy.co.uk/2013/06/playing-videos-on-the-raspberry-pi-command-line
www.raspberrypi.org/phpBB3/viewtopic.php?f=38&t=47520

ström

since the category 'visuals' is underrepresented in this blog and i don't like to embed video in my standard [html] pages, i thought i'd include this old piece here. this is the shorter abridged version of the full piece. the quality isn't the best - it's pixelated and stuttering. one day i should re-render it in 60fps at a higher resolution. it looks a lot better when running in realtime from a computer.

strömSA4 from Mattias Petersson on Vimeo.

Ström by Mattias Petersson (music) and Fredrik Olofsson (video) is, in its full version, a 45 minute minimalistic piece for five loudspeakers, live-electronics and live-video, based on an open-minded, artistic approach towards electricity. The piece is an attempt to transfer electric currents via sound to the audience. The five speakers in the surround system struggles to take over the sonic stream like electro-magnets. Sine waves and noise rotates with breakneck speeds around the listeners, tries to charge them with static electricity and, as an ultimate goal, even make them levitate. The video part is in direct connection with the sound and is generated out of five discrete lines – one for each channel in the surround system. The lines are treated in different ways, and as the high voltage builds up in the music they look more and more like electric wires, inflicting each other with violent discharges and eruptions. This version was made for a promotional DVD release on Swedish sound art.

also see here

pact - februari

another 30days of daily experiments. this time with cairo 2d and cinder + supercollider for sound. one very simple sketch per day to learn.

 

//--pakt29
osx binary
c++ code
processing

Ndef(\wrap).play
(
Ndef(\wrap, {var n= 29; Splay.ar({|i|
        var w= i+LFSaw.ar(i+1*SinOsc.ar(0.1, i/n*2pi, 1, 0.1), 0, 2, 2);
        Saw.ar(340+LFSaw.ar(0.1, i/n, 10, 10).round(20)+i)%Saw.ar(60+w, SinOsc.ar(0.01, i/n*2pi, 2, 2.05))*0.25;
}!n)});
)
Ndef(\wrap).stop


 

//--pakt28
osx binary
c++ code
processing

Ndef(\round).play
(
Ndef(\round, {var n= 28; GVerb.ar(Limiter.ar(LeakDC.ar(Mix({|i|
        var z= SinOsc.ar(i.linexp(0, n-1, 70, 1500), LFSaw.ar(i+1*5, 0, 0.5pi), LFSaw.ar(0.2+SinOsc.ar(i+1*0.001, 0, 0.5), i/n, 0.4).max(0))*SinOsc.ar(200+i, 0, SinOsc.ar(0.03, i+1, 0.5, 1))*SinOsc.ar(400+i, 0, SinOsc.ar(0.04, i+2, 0.5, 1))*SinOsc.ar(800+i, 0, SinOsc.ar(0.05, i+3, 0.5, 1));
        Pan2.ar(z, i.linlin(0, n-1, -0.925, 0.925), 1/n);
}!n))), 3, 5, 0.2, 0.8, 20, 0.1)});
)
Ndef(\round).stop


 

//--pakt27
osx binary
c++ code
processing

Ndef(\sweep).play
(
Ndef(\sweep, {var n= 9; GVerb.ar(Mix({|i|
        var t= i/n;
        var t2pi= t*2pi;
        var f= 2**i*10+100;
        var z= LeakDC.ar(VarSaw.ar(SinOsc.ar(i+1*0.005, SinOsc.ar(i+1*SinOsc.ar(0.05, t2pi, 0.2, 0.4), 0, 2pi), f*SinOsc.ar(0.002, t2pi, 0.3, 0.5), f), t, SinOsc.ar(i+1*0.006, t2pi, 0.4, 0.5), i.linlin(0, n-1, 0.4, 0.1)));
        Pan2.ar(z, SinOsc.ar(0.015, t2pi, i.linlin(0, n-1, 0.99, 0.2)), 1/n);
}!n), 30, 5, 0.3, 0.8, 10)});
)
Ndef(\sweep).stop


 

//--pakt26
osx binary
c++ code
processing

Ndef(\grid).play
(
Ndef(\grid, {var n= 8; Mix({|i|
        var t= i/n;
        var z= LeakDC.ar(VarSaw.ar(SinOsc.ar(VarSaw.ar(i+1*VarSaw.ar(0.048, 0, 0.5, 25, 150), t, 1/3, 150), VarSaw.ar(100+i, t, VarSaw.ar(0.024, t, 0.25, 0.475, 0.5))*pi, VarSaw.ar(0.012, t, 0.75, VarSaw.ar(0.064, t, 0.5, 25, 50), 200), VarSaw.ar(0.16, t, 2/3, VarSaw.ar(0.02, t, 0.5, 7.5).abs, 300)), t));
        Pan2.ar(z, VarSaw.ar(0.02, t, 0.5), 1/n);
}!n)});
)
Ndef(\grid).stop


 

//--pakt25
osx binary
c++ code
processing

Ndef(\wobble).play
(
Ndef(\wobble, {var n= 5; Mix({|i|
        var z= SinOsc.ar(0, SinOsc.ar(60+(i*SinOsc.ar(0.004, 0, 0.8, 1)), i/n*2pi, SinOsc.ar(0, SinOsc.ar(1, 0, 2pi), SinOsc.ar(0.006), 2pi)), 1/n);
        Pan2.ar(z, z);
}!n)});
)
Ndef(\wobble).stop


 

//--pakt24
osx binary
c++ code
processing

Ndef(\swoop).play
(
Ndef(\swoop, {var n= 24; Mix({|i|
        var t= i/n;
        var f= SinOsc.ar(0.01, t*0.5pi).exprange(i*22+100, i*44+1000);
        var a= SinOsc.ar(0.05*t, t*2pi, 0.15).max(0);
        var z= RLPF.ar(GrayNoise.ar(a), f*(a+1), 1.4-a-t);
        Pan2.ar(z, LFTri.ar(0.05, t*4, 0.95));
}!n)});
)
Ndef(\swoop).stop


 

//--pakt23
osx binary
c++ code
processing

Ndef(\sway).play
(
Ndef(\sway, {var n= 23; LeakDC.ar(Splay.ar({|i|
        var t= i/n;
        var f= t.linexp(0, 1, LFTri.ar(0.04, t*4, 400, 600), LFTri.ar(0.03, t*4, 400, 600));
        var a= SinOsc.ar(SinOsc.ar(0.02, t*pi, 0.5), 0, 0.5).max(0);
        SinOsc.ar(f+SinOsc.ar(f, 0, f*a), LFTri.ar(SinOsc.ar(t+0.01, t, 20), t*4, 4pi), a)
       
}!n))});
)
Ndef(\sway).stop


 

//--pakt22
osx binary
c++ code
processing

Ndef(\blob).play
(
Ndef(\blob, {var n= 33; Mix({|i|
        var t= i/n*2pi;
        Pan2.ar(
                LeakDC.ar(SinOsc.ar(Pulse.ar(0.0625, 2/3).range(0.0625, 0.125), SinOsc.ar(Pulse.ar(i+1, Pulse.ar(i+1*0.125, 0.4, 1/3, 0.5), i+1*n, i+1*n*2), t, SinOsc.ar(SinOsc.ar(0.0625, t), t, 2pi)), SinOsc.ar(i+1*0.125, t, i.linexp(0, n-1, 3/n, 0.001)))),
                SinOsc.ar(0.125, t, i.linlin(0, n-1, 0, 0.95))
        )
}!n)});
)
Ndef(\blob).stop


 

//--pakt21
osx binary
c++ code
processing

Ndef(\flush).play
(
Ndef(\flush, {var a= {|i| 2.pow(i)}!7; Mix(
        Pan2.ar(
                Resonz.ar(SinOsc.ar(0, GrayNoise.ar(a*30*pi), 0.5), a*300, SinOsc.ar(a*0.13, 0, 0.4, 0.5)),
                SinOsc.ar(a*0.03, 0, 0.95)
        )
)});
)
Ndef(\flush).stop


 

//--pakt20
osx binary
c++ code
processing

Ndef(\veil).play
(
Ndef(\veil, {var n= 15; GVerb.ar(Mix({|i|
        var t= i/n*2pi;
        var f= SinOsc.ar(SinOsc.ar(i*0.015+0.015, t), t);
        var a= SinOsc.ar(f*SinOsc.ar(i*0.15+0.15, t, 0.15, 0.3), t, 150, 300);
        var b= SinOsc.ar(f*SinOsc.ar(i*0.15+0.15, t, 0.15, 0.5), t, 1500, 3000);
        Pan2.ar(
                BPF.ar(
                        Saw.ar(SinOsc.ar(f, t).exprange(a, b), SinOsc.ar(f, t, SinOsc.ar(0.015*t+0.15, t, 0.15).max(0))),
                        a+b*0.5,
                        0.15
                ),
                i/(n-1)*2-1
        );
}!n), 15, 1.5, 0.15)});
)
Ndef(\veil).stop


 

//--pakt19
osx binary
c++ code
processing

Ndef(\bubbles).play
(
Ndef(\bubbles, {var n= 6;
        GVerb.ar(Mix({|i|
                var q= i/n*2pi;
                var t= Impulse.ar(SinOsc.ar(SinOsc.ar(0.125, q, 0.5), 0, 1.5, 2), i/n);
                Pan2.ar(
                        SinOsc.ar(
                                SinOsc.ar(0.01, q, SinOsc.ar(0.05, q, 50), 500),
                                Decay2.ar(t, 0.02, 0.2, SinOsc.ar(0.05, q, SinOsc.ar(0.01, 0, 8pi, 8pi))),
                                Decay2.ar(t, TRand.ar(0.003, 0.03, t), TRand.ar(0.05, 0.1, t), SinOsc.ar(0.08, q, 0.2, 0.05).max(0))
                        ),
                        (i/(n-1))*2-1
                );
        }!n), 40, 2, 0.6);
});
)
Ndef(\bubbles).stop


 

//--pakt18
osx binary
c++ code
processing

Ndef(\grey).play
(
Ndef(\grey, {Mix({|i|
        var t= Impulse.ar(SinOsc.ar(0.1+(i*0.125), i/10*2pi+#[0, 0.1], 4, SinOsc.ar(0.01+(i*0.01), 0, 4, 12)));
        Pan2.ar(
                FreeVerb.ar(
                        BPF.ar(
                                GrayNoise.ar(
                                        Decay2.ar(
                                                t,
                                                TRand.ar(0.005, 0.015, t),
                                                TRand.ar(0.1, 0.15, t),
                                                TRand.ar(0.35, 0.5, t)
                                        )
                                ),
                                SinOsc.ar(i+1*0.032, #[0, 0.1]+i).exprange(i+1*300, i+1*600),
                                SinOsc.ar(i+1*0.025, #[0, 0.1]+i).range(0.1, 1)
                        ),
                        0.3
                ),
                SinOsc.ar(0.1, #[0, 0.1]+i, 0.9)
        );
}!10)});
)
Ndef(\grey).stop


 

//--pakt17
osx binary
c++ code
processing

Ndef(\shades).play
(
Ndef(\shades, {Mix({|i|
        var x= SinOsc.ar(0, SinOsc.ar(0.01*i+0.03, i, 2pi));
        var y= SelectX.ar(x.range(0, 4), [WhiteNoise.ar, GrayNoise.ar, PinkNoise.ar, BrownNoise.ar]);
        var z= Pan2.ar(y, x*0.4);
        BPF.ar(Rotate2.ar(z[0], z[1], i/3*2-1), i+1*500, SinOsc.kr(0, SinOsc.ar(i*0.02+0.01, i, pi)).range(1, 10), 0.3);
}!4)});
)
Ndef(\shades).stop


 

//--pakt16
osx binary
c++ code
processing

Ndef(\wheel).play
(
Ndef(\wheel, {Splay.ar(BPF.ar(PinkNoise.ar(1!3)*SinOsc.ar(VarSaw.ar(#[0.011, 0.012, 0.013], #[0, 0.1, 0.2], 0.5, VarSaw.ar(#[0.01, 0.02, 0.03], #[0, 0.1, 0.2]).exprange(5, 50), #[300, 303, 309]), CombN.ar(Saw.ar(#[3, 2.5, 1], 0.5pi).sum, 0.05, 0.05), 3), VarSaw.ar(#[0.021, 0.022, 0.023], #[0.2, 0.1, 0.3]).exprange(500, 2000), VarSaw.ar(#[0.031, 0.032, 0.033], #[0, 0.1, 0.2]).exprange(0.06, 0.6)))});
)
Ndef(\wheel).stop


 

//--pakt15
osx binary
c++ code
processing

Ndef(\spin).play
(
Ndef(\spin, {GVerb.ar(Mix(Pan2.ar(Formlet.ar(LPF.ar(Saw.ar((5..1)*LFPulse.ar(SinOsc.ar(0.1, 0, 0.5, 1), 0.5, 0.5, 10, 50)+SinOsc.ar((6..2)*0.05).exprange(0.05, 50), 0.3), 300)+Impulse.ar((0..4)+SinOsc.ar((4..8)*0.02).exprange(0.3, 300)), (1..5)*SinOsc.ar((5..9)*0.05).exprange(200, 2000)*SinOsc.ar(SinOsc.ar((2..6)*0.1, 0, 0.1), 0, 0.1, 1), 0.001, 0.0015), SinOsc.ar(SinOsc.ar((3..7)*0.1, 0, 0.1)))))});
)
Ndef(\spin).stop


 

//--pakt14
osx binary
c++ code
processing

Ndef(\interfere).play
(
Ndef(\interfere, {var a= #[3, 1, 5, 2]; Limiter.ar(Splay.ar(Formlet.ar(LFPulse.ar(a*100+SinOsc.ar(a, 0, a/20), 0, SinOsc.ar(a/10, 0, 0.45, 0.5), LFPulse.ar(a+a, 0, SinOsc.ar(a/10, 0, 0.45, 0.5), 0.1)), a*100+LFPulse.ar(a/2, 0, 0.5, a*SinOsc.ar(a/100, 0, 150, 200)), SinOsc.ar(a/30, 0, 0.01, 0.0125), SinOsc.ar(a/60, 0, 0.05, 0.055), 0.2)))});
)
Ndef(\interfere).stop


 

//--pakt13
osx binary
c++ code
processing

Ndef(\pulse).play
(
Ndef(\pulse, {GVerb.ar(Splay.ar(Resonz.ar(LFPulse.ar(#[121, 232, 343]), LFPulse.ar(#[0.121, 0.232, 0.343]).exprange(LFPulse.ar(#[12.1, 23.2, 34.3]).range(80, 100), LFPulse.ar(#[1.21, 2.32, 3.43].reverse).range(800, 1000)).sum*LFPulse.ar(#[0.121, 0.232, 0.343]).range(0.5, 1), 0.3, 0.15)), 34, 3, 0.2)});
)
Ndef(\pulse).stop


 

//--pakt12
osx binary
c++ code
processing

Ndef(\waves).play
(
Ndef(\waves, {var n= 3; Mix({|i|
        var z= VarSaw.ar(i+1*0.01, 0, 0.5, 5, VarSaw.ar(i+1*10, 0, 0.5, 0.5, 10));
        var w= VarSaw.ar(i+1*VarSaw.ar(i+1*0.001, 0, 0.5, z, z*z), 0, 0.5, 0.5, 0.5);
        Pan2.ar(
                SinOsc.ar(0, VarSaw.ar(i+1*w*100, 0, w, 2pi), VarSaw.ar(i+1*w*0.1, 0, w, 0.5)),
                i.linlin(0, n-1, -0.9, 0.9),
                2/n
        )
}!n)});
)
Ndef(\waves).stop


 

//--pakt11
osx binary
c++ code
processing

Ndef(\hail).play
(
Ndef(\hail, {Splay.ar(Ringz.ar(SinOsc.ar(#[0.000101, 0.000202, 0.000303, 0.000404, 0.000505, 0.000606], SinOsc.ar(#[101, 202, 303, 404, 505, 606], 0, SinOsc.ar(#[0.0101, 0.0202, 0.0303, 0.0404, 0.0505, 0.0606], 0, pi)))*VarSaw.ar(#[1.01, 2.02, 3.03, 4.04, 5.05, 6.06], #[0.101, 0.202, 0.303, 0.404, 0.505, 0.606], SinOsc.ar(#[0.00101, 0.00202, 0.00303, 0.00404, 0.00505, 0.00606], 0, 0.5, 0.5)), #[1010, 2020, 3030, 4040, 5050, 6060], SinOsc.ar(#[10.1, 20.2, 30.3, 40.4, 50.5, 60.6], 0, 0.1, 0.2), 0.2))});
)
Ndef(\hail).stop


 

//--pakt10
osx binary
c++ code
processing

Ndef(\rain).play
(
Ndef(\rain, {
        var n= 30;
        Mix({|i|
                var z= SinOsc.ar(i+1*0.01, 0, 0.001);
                var f= i*100+100+SinOsc.ar(0.0123+z, i/n*2pi).exprange(1, 30);
                var q= SinOsc.ar(0.0234+z, i/n*2pi, 0.3, 0.7);
                var p= SinOsc.ar(0.0345+z, i/n*2pi);
                var a= SinOsc.ar(0.0456+z, i/n*2pi, 0.4, 0.45);
                var x= HPF.ar(BPF.ar(HPF.ar(ClipNoise.ar(2)*Crackle.ar(SinOsc.ar(0.0123+z, i/n*2pi, 0.1, 1.8))), f, q));
                Pan2.ar(x, p, a);
        }.dup(n));
});
)
Ndef(\rain).stop


 

//--pakt09
osx binary
c++ code
processing

Ndef(\snow).play
(
Ndef(\snow, {
        Mix({|i|
                var m= SinOsc.ar(0.005, i/5*2pi, 2pi);
                var p= WhiteNoise.ar(SinOsc.ar(i*5000+5000)*SinOsc.ar(i*500+500)*SinOsc.ar(i*50+50)*SinOsc.ar(i*5+5));
                Pan2.ar(SinOsc.ar(p, p*m, p), p, 0.5);
        }.dup(5));
});
)
Ndef(\snow).stop


 

//--pakt08
osx binary
c++ code
processing

Ndef(\redqueen3).play
(
Ndef(\redqueen3, {GVerb.ar(LeakDC.ar(
        Saw.ar(
                Saw.ar([100, 101]+Saw.ar([102, 103], Saw.ar([4, 5], Saw.ar([1, 2], 6, 7).sum).sum).sum).exprange(Saw.ar(1/12, 4, 50), Saw.ar(1/8, 3, Saw.ar(1/16, 4, 65))),
                Saw.ar([21, 20], Saw.ar([40, 41], 0, 0.1), 0.2)
)), 60, 4, 0.5, 0.5, 5, 0.5, 0.5, 0.75)});
)
Ndef(\redqueen3).stop


 

//--pakt07
osx binary
c++ code
processing

Ndef(\redqueen2).play
(
Ndef(\redqueen2, {GVerb.ar(LeakDC.ar(
        SinOsc.ar(
                SinOsc.ar(
                        SinOsc.ar(
                                SinOsc.ar(
                                        SinOsc.ar(
                                                SinOsc.ar(
                                                        SinOsc.ar(
                                                                1,
                                                                0,
                                                                2,
                                                                SinOsc.ar(1/2).exprange(1, 2)
                                                        ),
                                                        0,
                                                        8,
                                                        SinOsc.ar(1/4).exprange(4, 8)
                                                ),
                                                0,
                                                32,
                                                SinOsc.ar(1/8).exprange(16, 32)
                                        ),
                                        0,
                                        128,
                                        SinOsc.ar(1/16).exprange(64, 128)
                                ),
                                0,
                                512,
                                SinOsc.ar(1/32).exprange(256, 512)
                        ),
                        0,
                        2048,
                        SinOsc.ar(1/64).exprange(1024, 2048)
        ), 0, 0.1)
), 16, 8, 0.75, 0.5)});
)
Ndef(\redqueen2).stop


 

//--pakt06
osx binary
c++ code
processing

Ndef(\redqueen).play
(
Ndef(\redqueen, {GVerb.ar(LeakDC.ar(SinOsc.ar(SinOsc.ar([1/16, 1/12], 0, 5), SinOsc.ar(0, SinOsc.ar([SinOsc.ar(3, 0, 5, 12), SinOsc.ar(4, 0, 4, 16)], SinOsc.ar([SinOsc.ar(1/64, SinOsc.ar(0.5, 0, pi)).exprange(1, 30), SinOsc.ar(1/48, SinOsc.ar(0.75, 0, pi)).exprange(1, 30)], SinOsc.ar(SinOsc.ar(1/32, 0, 4), 0, 2pi), SinOsc.ar([1/6, 1/8], 0, 0.5pi, 2pi)), SinOsc.ar([1/3, 2/3], 0, 0.5pi, SinOsc.ar(1/8, 0, 0.5pi, 2pi))), SinOsc.ar([4/3, 3/4], 0, 0.5pi, SinOsc.ar([SinOsc.ar(1/256).exprange(80, 800), SinOsc.ar(1/256).exprange(80.8, 808)], 0, 0.5pi, 2pi)))))*0.05, 10, 3, 0.5, 0.5)});
)
Ndef(\redqueen).stop


 

//--pakt05
osx binary
c++ code
processing

Ndef(\noises).play
(
Ndef(\noises, {
        var freq= SinOsc.ar(SinOsc.ar((4..0)/150+SinOsc.ar((0..4)/18, 0, 0.8)), SinOsc.ar((0..4)/80+SinOsc.ar((0..4)/20, 0, 0.1), 0, 2pi)).exprange(100, 1000);
        var rq= SinOsc.ar(SinOsc.ar((0..4)/6+SinOsc.ar((0..4)/19, 0, 0.7), SinOsc.ar((4..0)/5+SinOsc.ar((4..0)/2, 0, 0.1), 0, 2pi))).exprange(0.4, 4);
        Splay.ar(BPF.ar(BPF.ar(ClipNoise.ar(1!5), freq, rq), freq, rq), 0.85);
});
)
Ndef(\noises).stop


 

//--pakt04
osx binary
c++ code
processing

Ndef(\lines).play
(
Ndef(\lines, {Splay.ar(LeakDC.ar(SinOsc.ar([0.033, 0.066, 0.055, 0.044], SinOsc.ar([0.12, 0.13, 0.11, 0.14]*SinOsc.ar([0.151, 0.152, 0.153, 0.154], SinOsc.ar([5, 4, 3, 2], 0, 2pi), SinOsc.ar([0.043, 0.053, 0.063, 0.073], 0, [80, 60, 40, 100])), SinOsc.ar(([60, 64, 67, 71]+SinOsc.ar([0.024, 0.025, 0.026, 0.027], SinOsc.ar([0.01, 0.02, 0.03, 0.04], 0, pi), 1).round).midicps, 0, 2pi)), 0.2)))})
)
Ndef(\lines).stop


 

//--pakt03
osx binary
c++ code
processing

Ndef(\varsaws).play
(
Ndef(\varsaws, {GVerb.ar(CombC.ar(VarSaw.ar(SinOsc.ar([0.1, 0.11], 0, 5, 100+SinOsc.ar([0.05, 0.055], 0, 50, 50).round(50)), 0, SinOsc.ar([0.2, 0.22], 0, 0.5, SinOsc.ar([0.3, 0.33], 0, 0.1, 0.5)), 0.1), 1.01, SinOsc.ar([0.4, 0.44], 0, 0.01, 1), 8), 80, 5, 0.9)})
)
Ndef(\varsaws).stop


 

//--pakt02
osx binary
c++ code
processing

Ndef(\saws).play
(
Ndef(\saws, {Splay.ar(BPF.ar(LeakDC.ar(Saw.ar(SinOsc.ar((0..5)+1*0.02, SinOsc.ar((0..5)+1*101+300, 0, 2pi+SinOsc.ar(0.01, 0, 0.5*pi)), 400, 700))), SinOsc.ar((0..5)+1*0.004, 0, 100, 400), SinOsc.ar((0..5)+1*0.006, 0, 0.4, 0.8)))})
)
Ndef(\saws).stop


 

//--pakt01
osx binary
c++ code
processing

Ndef(\moreSines).play
(
Ndef(\moreSines, {LeakDC.ar(Splay.ar(SinOsc.ar((0..20)/70+0.01, SinOsc.ar((0..20)+1*50+50+SinOsc.ar((0..20)+1/30), 0, 2pi), SinOsc.ar((0..20)+1/80, (0..20)/40, 0.2).max(0))))})
)
Ndef(\moreSines).stop


 

//--pakt00
osx binary
c++ code
processing

Ndef(\sines).play
(
Ndef(\sines, {GVerb.ar(Splay.ar(SinOsc.ar([100, 200, 300]+SinOsc.ar([0.11, 0.22, 0.33]), SinOsc.ar([0.1, 0.2, 0.3], 0, 2pi), 0.1+SinOsc.ar([0.01, 0.02, 0.03], 0, 0.05)), SinOsc.ar(SinOsc.ar(SinOsc.ar(0.13, 0, 5, 6), 0, 8, 50), 0, 1, 1), 0.7, SinOsc.ar(1.2, 0, 0.6)), 20, 5, 1, 0.5, 25, 0, 1, 1)})
)
Ndef(\sines).stop


update 120212: i ported them all to processing. see _here
update 120617: rendered 1min mp3 excerpts. attached below under the code snippets.

Audiovisuals with SC

Audiovisuals with SC

Fredrik Olofsson

In this article we will investigate the built-in graphical features of SuperCollider and how they can be used artistically, in combination with the sound synthesis server. Different techniques for audiovisual mapping are presented along with some more personal reflections on the relationship between sound and graphics. My hope is that this text, together with the code examples, will provide inspiration for composers to work simultaneously in both the aural and the visual domain.

1 Introduction

It is clear that presenting any kind of visual stimuli in relation to music severely affects how we perceive the music. Often our experience is altered in more radical ways than we would like to acknowledge: "...one perception influences the other and transforms it. We never see the same thing when we also hear; we don’t hear the same thing when we see, as well" (Chion 1994, xxvi). Our goal as audiovisual composers should be to wisely utilize and direct these transformations. They become our material (Chion 1994; Collins and Olofsson 2006; Alexander and Collins 2007).
    Successful audiovisual pieces have the audience believe that sound and graphics truly exist in the same context. This is what Michel Chion calls the audiovisual contract (Chion 1994). How can we establish these contracts in our own work?
    As we will be coding both music and visuals, the most obvious way would be to share parameters between the sound and the graphics. So we do not just match sounds with visual events in time, but rather take the parameters that generate the graphics and let them also control aspects of the sound—or vice versa.
    Overusing these direct mappings is easily done and we risk tiring our audience with too obvious a correlation over too long a time. This is what in film music is called Mickey Mousing; it happens when all visual events have sounds or musical phrases connected to them. Ball bouncing: boing boing, duck climbs a ladder: upward scale etc. As this becomes much more of a problem for longer pieces, we will need to find ways to vary our mappings over time. It is often enough to convincingly establish the correlation in the very beginning of the piece. Your audience’s trust in the relation will last for quite some time. So not all visual events needs to have sounding representations, but the ones that do should be strong and trustworthy. And the same is true for sounding events in relation to visuals.
    For multimedia pieces, there is also the problem of balance of attention. When we are dealing with such a perceptually powerful medium as visuals, we run the risk of having them over-power the music. We hear but simply forget to listen. How can we as audiovisual composers rather merge these two disparate domains instead of having one belittle the other?
    In genres like film music, by comparison, a soundtrack that goes by unnoticed is often seen as a positive thing and something to strive for. So can we avoid composing subtle soundtracks for emotional manipulation, or avoid providing sounds just to fill a void in the narrative? This is all art in its own right, but we will prefer to focus here on the well balanced audiovisual work.
    Given the normal predominance of the visual element, I find there is often a need to keep the graphics minimal and even slightly boring. Simplicity, regularity and consistency are all strategies that will help our minds from being distracted in the act of active listening. And we are very easily distracted. Just as loud unexpected sounds can be frightening, a sudden movement, an unexpected color or shape will most likely grab our attention. These foreign objects will kick-start our minds to seek explanations for why a particular thing pops up there at that time.
    But this also means that our audience will actively come up with explanations and build narratives for things that were never meant to relate. They will want to see connections all the time by constantly looking for reasons—causes of the effects. We should strive to utilize this urge, feed it at the right times, play with and deceive it. We want to create the illusion of developments in the music being the logical cause of visual events.
    So just as we are careful not to add sudden loud sounds in the mixing of a music track, I believe one should tread as carefully when presenting new shapes, colors, kinds of movements etc. Creating visuals could be seen analogously to composing music, where new sounds and themes are usually hinted at and prepared for in advance. Then these elements are developed, picked apart, recombined and used throughout the piece. Form, harmony, processes, theme and variations should be just as important concepts when composing with graphics.
    But let us also not forget that contracts are made to be broken and that the visuals could and should provoke, jump out at you, be fun, annoying, wild and inspiring. Let them at times deliberately counteract the music. The effect of bringing it all back together will be all the more strong.
    I hope the text here does not suggest a form of add-on graphics that just sit there and look beautiful. They can be so much more than just candy for the eyes. For me, audiovisuals are about the interplay of graphics and sound, and harmony is not always the most exciting option.

2 Graphics in SuperCollider

With the Pen class, SuperCollider provides simple two dimensional graphics. Pen can only draw a few primitive shapes like lines, arcs, rectangles and ovals. These basic shapes can be stroked (outlined) or filled in different colors. As an example, the following code show how to draw a red rectangle with a blue unfilled oval inside of it. The Rect class is required to specify coordinates and size for these objects.

(
Window().front.drawFunc= {
        Pen.fillColor= Color.red;               //set fill color
        Pen.fillRect(Rect(10, 20, 200, 100));   //10 pixels from left, 20 from top
        Pen.strokeColor= Color.blue;            //set stroke color
        Pen.strokeOval(Rect(20, 30, 180, 80));  //180 pixels wide, 80 high
};
)

The line and lineTo methods helps us in drawing custom shapes. Here the Point class is needed to specify line segments. This excerpt will draw a fat yellow triangle:

(
Window().front.drawFunc= {
        Pen.width= 8;           //set pencil width in pixles
        Pen.strokeColor= Color.yellow;  //set stroke color
        Pen.moveTo(Point(100, 100));    //go to start position
        Pen.lineTo(Point(150, 50));
        Pen.lineTo(Point(200, 100));
        Pen.lineTo(Point(100, 100));
        Pen.stroke;     //perform all collected drawing commands in one go
};
)

Apart from drawing, Pen also lets you scale, transform (offset) and rotate the drawing area—but all in two dimensions only:

(
Window().front.drawFunc= {
        Pen.scale(0.5, 0.5);            //scale to half the size
        Pen.rotate(pi/4, 640/2, 480/2); //rotate 45 degrees in a 640 by 480 window
        Pen.translate(100, 200);        //offset drawing 100 pixels from left, 200 from top
        Pen.fillRect(Rect(0, 0, 100, 100));
};
)

These transformations affect the drawing commands that follow and will help to position and animate your shapes. With the use method we can define a scope for transformations as well as color settings:

(
Window().front.drawFunc= {
        Pen.strokeColor= Color.red;
        Pen.use{        //remember state (push)
                5.do{|i|
                        Pen.strokeColor= Color.grey(i/5);
                        Pen.scale(0.75, 0.9);                   //scale width and height
                        Pen.strokeOval(Rect(20, 30, 180, 80));  //results in smaller ovals
                };
        };      //revert back to state (pop)
        Pen.strokeOval(Rect(20, 30, 180, 80));  //big oval (note same size)
};
)

These are the basic features of the Pen class and they, as all drawing commands, must be performed within a certain window’s or user view’s redrawing routine (that is within a drawFunc function.).
    A class with so few features can of course be quite limiting and frustrating to work with. For instance you will have to combine several primitives to draw more complex shapes. But on the other hand, in combination with the outstanding flexibility of programming sounds in SuperCollider, Pen provides a unique improvisational way to explore and play with audiovisual mappings. Also, as it is so simple, it will force you to focus on the basic principles of shape, gesture and color.
    Moreover, Pen’s restraints can have a positive effect on the outcome. You more or less have to do simple, minimal and straightforward graphics and program everything yourself. Your ideas will be shown in crystal clarity to your audience—for better or for worse, as there are no fancy and fluffy video effects to hide them behind.

3 Structure of the examples

It is recommended that you study the Pen, Color, Window and UserView help files alongside this text. The better knowledge you have of these classes, the easier it will be to modify and adapt the code provided to suit your needs.
    All the examples referred to in this article use the same structure. First we create a window and place a user view inside of it.

s.latency= 0.05;
s.waitForBoot{
       
        //--window setup
        var width= 500, height= 500;
        var w= Window("Example00 - structure", Rect(99, 99, width, height), false);
        var u= UserView(w, Rect(0, 0, width, height));

The reason we use a user view here instead of, as shown in various help files, draw directly into the window with a drawFunc function, is that user views provide a few additional features that we will need later. Most important it lets us control when and how to clear the drawing area. A window’s drawFunc will always erase previous drawings when the window is refreshed and sometimes you rather want to keep drawing new things on top of the current graphics or draw while slowly fading out what was previously there. A user view can do this.
    After creating the window and user view, some more variables are defined. These will vary from example to example depending on what we will need to keep track of in the main loop below. Here we will set up things like counters, synths and responders.

        //--variables
        var theta= 0;   //will be used as a counter. no external access at runtime
        var syn= SynthDef(\av, {|freq= 400, amp= 0, pan= 0|
                var z= SinOsc.ar(0, BPF.ar(Pulse.ar(freq, amp)*2pi), amp);
                Out.ar(0, Pan2.ar(z, pan));
        }, #[0.05, 0.05, 0.05]).play(s);
        s.sync;

We then have some more settings in the form of environment variables. These will define things that we want to be able to change while the program is running. They will be our interface for the program and in most of the examples we will change these settings manually via the interpreter. But we could just as well control them with the help of the mouse, MIDI or OSC responders. Some later examples will show how to do that.

        //--interface
        ~speed= 0.025;          //it is possible to change these at runtime
        ~radius= 20;
        ~spreadx= 20;
        ~spready= 20;

Next in this general example structure comes the main loop. This function gets evaluated once each time the window is refreshed, and it is here that all of the actual drawing will take place.

        //--main loop
        u.drawFunc= {
                var x= sin(theta)*~spreadx;     //calculate coordinates
                var y= cos(x)*~spready;
                var a= x.hypot(y)/1.42/~spreadx;
                syn.set(                //update the synth with mapped parameters
                        \freq, y.linexp(height.neg*0.5, height*0.5, 100, 1000),
                        \amp, a.min(0.995),
                        \pan, x.linlin(width.neg*0.5, width*0.5, -1, 1)
                );
                Pen.translate(width*0.5, height*0.5);   //offset all drawing to the middle
                Pen.fillColor= Color.red;       //set the fill color
                Pen.fillOval(Rect.aboutPoint(Point(x, y), ~radius*a, ~radius*a));
                theta= theta+~speed%2pi;        //our counter counts in radians
        };

In this case we calculate positions and then use them to control a synth. Vertical window position is mapped to frequency (note the linear to exponential scaling), distance from window centre sets the amplitude and horizontal window position the panning.
    Finally, there are lines of code that set the user view’s clear behavior, give the window a background color and make the window visible. There is also a line that activates animation. This forces the user view to redraw itself around 60 times a second. Without animation the drawFunc function would only be evaluated once.

        //--window management
        u.clearOnRefresh= true;                 //erase view for each refresh
        u.background= Color.white;              //set background color
        w.onClose= {syn.free};                  //stop the sound when window closed
        w.front;                                //make the window appear
        u.animate= true;
        CmdPeriod.doOnce({if(w.isClosed.not, {w.close})});
};

So this will be the framework we work within. It should be pretty straight forward to follow and I believe it is flexible and general enough to serve you, dear reader, as a springboard for your own audiovisual experiments. Later in this article we will add features that will make the examples look more complex, but this basic structure will remain the same.

4 One-to-one mappings

Direct cross domain mappings are a fun and creative way to generate and control sounds and visuals. Before we look at more specialized examples, let us start with some simple tests and try to investigate, very roughly, which techniques could be used to attain strong audiovisual correlation. There is some subjectivity involved, and I do not want to make any binding claims or lists of rules to follow.

Example01a - louder is bigger
In this first example we map sound amplitude to object size. The code follows the structure outlined above and the only thing special would be the line var pat= Pn(Pshuf(#[0, 0, 0, 0, 0.1, 0.25, 0.5, 0.75, 1, 1], 8), inf).asStream; This creates an endless stream of amplitudes in the form of a pattern that reshuffles an array after eight repetitions. This is just to get some variation while still keeping to a fairly repetitive rhythm. Repetition will help us see the effect of our mapping technique more clearly.
    With the program still running, try changing the environment variables at the bottom. Different settings let us explore the effect of the mapping in different situations. This one-to-one mapping of loudness and size is a very strong one. It is hard to imagine a more direct example.

Example01b - louder is smaller
In this example we use exactly the same code as in Example01a, but invert the relation of amplitude and size. So the louder the volume, the smaller the object. The only line added is the amp= 1-amp; which swaps this relation around and inverts the amplitude value just before drawing the oval.
    Of course there is a strong correlation between graphics and sound here as well, but it can feel stranger to watch. With greater radius and slower frame rate (~fps), you will probably notice it even more. This way of mapping, although as direct and consistent as the previous version, does not feel nearly as 'natural'. How is that? We are so accustomed to the bigger-is-louder representation from the real world that it is hard to appreciate this backwards mapping in its own right.

Example02a - higher is bigger
Next we try a new technique. We map the frequency of the sound to the size of the object in such a way that higher pitches will draw bigger ovals. The code is almost identical to the previous examples except for some minor alterations to the synth definition. Now it lets us set the frequency with the scaleFreq argument. I find this audiovisual mapping is also very strong and direct.

Example02b - higher is smaller
After that we again invert the mapping of the previous example. One line differs and now lower pitches are drawn bigger. Interestingly enough, one could think that if physical laws were governing how 'natural' a mapping would be to us, then bigger objects would be more likely to sound with a lower pitch. It might be due to the construction of this particular example, but to me it does not map across domains as well as Example02a.

Example03a - louder is brighter
Example03b - louder is darker
These two examples demonstrate the effect of connecting brightness to amplitude. We use a gray color for the window background to try to be a bit more neutral. The mapping used in these two examples are perhaps not so direct and obvious, but personally I find that louder-is-brighter feels better than when louder means a darker color. Do not forget to test the different settings with the environment variables at the bottom.

Example04a - higher is higher
Example04b - higher is lower
Example05a - left is left
Example05b - left is right
Here two pairs of examples where we use the position on the screen as parameters. These are of course very useful parameters to play with and although we probably agrees on them as basic and 'natural' principles we can follow, they can also easily be ruled out or temporarily loose meaning. Projecting on the floor or on the ceiling, for instance, will obviously make the so strong up-equals-higher-frequency assumption invalid. Perhaps it will work just as well with higher-frequency-equals-further-away, but often one has to invent the logic to accompany the display situation.

Example06a - louder is higher
Example06b - louder is lower
Another pair of examples of the same kind as 04 and 05. We are maybe slightly more forgiving with which direction sound amplitude is mapped to than with frequency and panning. But here louder and higher position on the screen maps well I think. This could very well be a standard we have just become used to from all the different applications using this metaphor.

Example07a - higher is faster
Example07b - higher is slower
Here is another kind of technique of mapping that is often overlooked. The connection between frequency and speed of movement, as demonstrated here, is an important and strong one.

Example08a - faster is faster
Example08b - faster is slower
For these next examples we change the SynthDef a little to provide shorter pulses that we can play at any rate. It surprises me how well the faster-is-slower mapping works in this case. This is perhaps only due to the specifics of the example, and the reader might like to investigate the synchronization of the phase of the sound onsets versus the left-right visual position.

Example09a - brighter is sharper
Example09b - brighter is smoother
These examples investigate if sounds with brighter timbre match objects with sharper corners or smooth and less complex sounds fit better with rounded objects. Maybe we are very used to seeing waveforms plotted and cannot see the inverted version (Example09b) in its own right.
    This code is a little bit more complex as we use a function to draw a star shape, varying the number and the size of arms. The arguments for the function are position, number of arms, and outer and inner radius. We also use stroke here instead of fill to draw the segments of lines.

Example10a - voices are objects
Example10b - voices not objects
One obvious correlation is to let each visible object have its own unique sound or voice. Then, more objects means more sound in total. As the second example shows, the opposite mapping is quite hard to appreciate.
    In this code we create an array of fifty synths and play them at the same time. The environment variable ~num decides how many of these will be assigned an amplitude above zero and be heard. CPU cost is thus constant rather than dynamically changing, but this technique allows simpler coding. We also give the audible objects a unique frequency and panning position.

Example11a - harmonicity is order
Example11b - harmonicity is disorder
This last pair of example in this section map equally spaced graphical objects to the harmonic series. The less equally spaced the objects are, the further from the harmonic series their partial frequencies will be. In the inverted version this relationship is flipped and less visual order brings us closer to the overtone series.
    There should be nothing difficult to understand in the code. The ~dist parameter is in percent and will decide how much each object deviates both graphically and from the harmonic series.

5 More mappings

In most of the examples above I believe that the majority of us would agree on which mappings are the most effective. There seem to be some basic rules governing what can be considered good audiovisual mappings after all. Yet, there are many more possible relations to investigate. Out of these, I believe fewer and fewer of us will agree on how well they work. Pitch in relation to color is a difficult area that appears more subjective (maybe because colored hearing is a form of synesthesia; see Alexander and Collins 2007.)
    Example12 tries to combine many of the above mappings in a single program. We map amplitude to size, brightness and screen position. Frequency and panning are dependent on screen position and the speed of the object, and the more voices you add (~num) the more graphical objects will appear. Example13 is another little program with multiple parameters mapped across the domains.
    With as many one-to-one parameter mappings as in these examples, the result will obviously not be as many times more effective in terms of audiovisual correlation. The effect seems to average out after the first few parameters. In both examples, notice how hard it becomes to follow individual objects when there are lots of things happening at the same time. A great number of objects might even lessen the correlation effect of direct mapping in total.
    It is my impression that it is preferable to use a few clear parameter relations that instead are varied over time. A smaller number of visible objects with fewer mappings will result in a stronger impact for those mappings that are present. So rather than trying to relate everything in the music to something showing on screen, pick the most prominent feature of the music (possibly by ear), find a mapping for it and let that be the only thing visible (Collins and Olofsson 2006).
    To keep an audience’s interest with less parameters mapped (and avoid ‘Mickey Mousing’), we can let our one-to-one mappings change during the piece. These transitions could be important parameters for defining the form, or to play with when designing interactive installations. For example, imagine an installation with a deliberately odd correlation between the audio and the visuals. As people start to engage more with it, when they start to collaborate and try out new things, the mapping could, as a reward, become more ‘natural’ and direct. This is a subtle but effective way to keep people engaged. They will feel that they gain more control as the system changes.

6 Systems

One common method among audiovisual works is to set up some form of system and let that drive the sound and the graphics. Simple models of physical laws like gravity could be part of such a set up and Example14 presents a basic particle system with gravity and damping. Systems such as this are often implemented just as audiovisual pieces, as seeing the effect of (say) gravity is something quite different from only hearing it. The eyes will guide the ears. Also these constructions leads us to control our sounds in ways that may otherwise be harder to conceive.
    For Example14, click and drag with the mouse to create new balls. The balls will bounce around for a little while and then slowly disappear. The Point class is used as a vector to describes direction and velocity and there is also a function that returns a dictionary for every ball created. Each dictionary stores the unique settings for a given ball.
    Example15 is an implementation of a simple but beautiful system John Whitney describes in his book Digital Harmony (Whitney 1980). Whitney was a pioneer in early computer graphics as well as in experimental film. In his system the second ball will rotate at half the speed of the first, the third with half the speed of the second etc. The outermost ball will take many minutes to complete a cycle. Each ball has an unique frequency that is heard only once per lap. Complex patterns arises as the string of balls twists and unfolds.

7 Audio analysis

So far we have generated the sounds together with the graphics. Now we look into analysis of the audio signal and how that can be used to drive visuals. Example16 sets up a number of peak filters spread out between 200 and 6000 Hertz. The amplitude of each filter output is tracked and sent back to the program with the help of a SendTrig and OSCFunc pair. This is a very common technique used in dedicated realtime video programs to extract data from the music, though typically it only analyses the musical surface and not component events. There are also always problems with latency and inaccurate frequency matching. If you generate the music yourself and know the synthesis parameters, for instance using patterns, then it is much better to simply map that data directly and not have to deal with analysis at all.
    Another option is to use FFT and draw sonograms. Example17 shows one way that this can be accomplished. The version with rotation uses a little trick to manually clear the drawing area. If we draw a rectangle covering the whole area and fill it in a semi-transparent color we get a nice trail effect:

Pen.fillColor= Color.grey(1, ~trails);
Pen.fillRect(Rect(0, 0, width, height));

As the ~trails variable approaches zero, the rectangle will fade out the previous frame slower and slower.

Example18 uses yet another technique. It just draws the raw waveform from the source sound. With some simple rotation and trail effects the simple graphics can get quite interesting.

8 Audiovisual instruments

Example19 shows how to set up MIDI control to a combined audiovisual instrument. I tend to play differently when seeing graphics like this and I know I start to prefer sounds and sequences that also look good. This process could be thought of as a form of forced synesthesia.
    You might need to edit the settings for the MIDIIn.control function to match your particular MIDI device. By default it expects MIDI controller numbers 1 to 7.

9 Presenting your work

After finishing your piece you probably want to present it in some way. For projecting with a video projector, I would recommend that you place a black picture as a desktop picture on your external monitor (if your computer supports it). Then create a borderless non zoomable window and place it somewhere in the centre. This technique will let you keep your drawing’s dimensions and it will look the same with the projector set to different resolutions.
    There is a fullscreen feature in for SuperCollider windows, but note that it might resize your drawings to match the current screen dimensions and this is often something you want to control yourself. CPU usage will also go up as drawing in bigger windows requires more work.
    Consider projecting onto other surfaces than the standard rectangular white screen. I myself have had good results with different materials such as transparent mosquito nets, huge machinery wrapped in paper, hanging naked bodies etc. With the clip method you can define a mask for your visuals and have the drawing only happen within those bounds (see Example20):

Pen.moveTo(Point(200, 200));            //move to start position
Pen.lineTo(Point(300, 100));            //define a mask (here triangular)
Pen.lineTo(Point(400, 200));
Pen.lineTo(Point(200, 200));
Pen.clip;       //set the mask for drawing commands that follows
//continue drawing here
//commands will automatically be clipped outside the triangle

Be prepared to also tune your colors if projecting onto a non white surface as they will become tinted.
    In SuperCollider it can be a bit tricky to render your work and save it as a movie file. You could record directly onto a digital video (DV) camera that has analogue video input (s-video or composite). The camera will also record your sound in sync and not tax the CPU of the computer. The drawback is that it may result in far from perfect quality. Another (emergency only) option is to film the computer screen with a camera. With LCD screen the result is not so bad as one would expect but still far from optimal.
    A better option is to capture an area of the screen in realtime with screen grab/cast programs. For Mac OS X there is iShowU and Snapz Pro, for Linux Demorecorder and for Windows Taksi and Fraps. These programs are now efficient and can record both high quality video and sound in realtime without taking too much of the computer’s CPU power.
    For very high resolution and CPU demanding visuals, you might consider writing your own rendering engine. Edit your program to not draw anything, just collect all the drawing commands in an array without actually performing the drawing. The audio commands can be separately rendered to an audio file using the NRT mode if necessary. For the visuals, you play back the drawing commands at a very slow framerate. Use one of the screen recording applications mentioned above to create a high quality movie (or, if you use Mac OS X, SCImage to write single image files). Finally you combine the movie and the previously recorded sound file in i.e., QuickTime Pro. A marker in the form of a single white frame and an audio impulse might be needed to get the sync back (or you could timestamp your drawing commands).
    But the best option of all is, or course, to distribute your work as open source code. With no loss in quality, your audience can study and learn from your work and, most importantly, you can use generative techniques for variations. The piece can be endless and surprising even for you as its creator. This is something you obviously lose when recording your work into a fixed medium.
    For museums and galleries possibly build a dedicated and standalone version of SuperCollider that will automatically start your piece at startup. See the helpfiles on using the startup file and creating standalone applications.

10 Other options for graphics

If you want to extend beyond two dimensional graphics, use OpenGL or realtime video, then the Pen class will not be sufficient. Also note that Pen is not the best choice if you plan to animate hundreds of objects. It does not perform as well as specialized graphical environments. Good options include MaxMSP/Jitter, PD/Gem, Processing, LuaAV and more. Communication with these programs is simply effected using Open Sound Control.
There is also ScGraph (a graphical server written by Florian Schmidt), SCQuartzComposerView and SCImage (the last two are built into SuperCollider Mac OS X, see respective helpfile). SCQuartzComposerView lets you play and control Quartz Composer compositions within a SuperCollider window. SCImage adds advanced image processing and bitmap operation features to SuperCollider via the CoreImage framework. It works well together with Pen and lets you, among other things, write your Pen graphics to disk as single image files (tiff, bmp, jpeg etc.).

11 Ending

Please consider this text and its humble investigation of audiovisual mappings as a starting point for your own experiments. There is much more to explore in the relation of graphics and sound, and lots of room for personal interpretation. The examples here only present a limited set of techniques, but there are parts that I hope can be reused and built upon further. Take them apart, break them and remix the code! In particular, I think the area of personalized combined audiovisual instruments is very interesting; designing and playing these systems completely changes my concept of what visualization and sonification of processes, systems and music can mean.

12 References

Alexander, A., and Collins, N. 2007. Live Audiovisual Performance. In Collins, N., and d'Escrivan, J., eds. The Cambridge Companion to Electronic Music. Cambridge: Cambridge University Press.

Chion, M. 1994. Audio-Vision: Sound on Screen. New York: Columbia University Press. Original published 1990, translated by Gorbman, C.

Collins, N., and Olofsson, F. 2006. klipp av: Live Algorithmic Splicing and Audiovisual Event Capture. Computer Music Journal 30(2): 8-18

Whitney, J. 1980. Digital Harmony: On the Complementarity of Music and Visual Art. Peterborough, N.H.: Byte Books/McGraw-Hill.

Creative Commons LicenseAudiovisuals with SC by Fredrik Olofsson is licensed under a Creative Commons Erkännande-Ickekommersiell-Dela Lika 3.0 Unported License.

updated 090703: code examples updated for sc3.3.1 and swingosc0.62
updated 120511: code examples updated for sc3.5, cocoa and qt
updated 121231: fixed some links and bugs and updated for sc3.9

red-framework published on googlecode

in june i cleaned up and released my red-framework for managing max/jitter patches. it is hosted here and you can get it via anonymous svn checkout.

the framework is for stacking, chaining and mixing max/jitter patches and shows my way of organising patches. i've been working on/with it since 2006 and it now contains >100 modules. it can handle jitter, control data, midi and also softvns video under max4.5.

welcome to join the project if you are interested. it is easy to write your own modules.

installation...

(for osx 10.4 and earlier you'll first need to install svn separately)
in the terminal type:

svn checkout http://red-framework.googlecode.com/svn/trunk/ red-framework-read-only

then press (p) to accept permanently.
last add the red-framework folder to max's file preferences.

it is licensed under gnu gpl v2 and requires max5+jitter for osx. it has not been tested on windows xp yet but should run.

concept...

modules: generators, modulators, outputs
faders: cross, gain, etc.
slots = module+fader
chain = slots in series
stack = slots in parallel
mixer = go from parallel to serial

//--module:
a max/jitter patch following a simple standard
it must have 2 inlets: in, ctrl
and 2 outlets: out, info
the module can be generator, modulator or output

//--slot:
a slot is a fader + a module
slots also have 2 inlets: in, ctrl
and 2 outlets: out, info

//--stack:
builds a stack of slots - serial in and parallel out

//--chain:
builds a chain of slots - serial in and serial out

//--mixer:
a mixer of slots - parallel in and serial out

pros and cons...

why use red-framework?
same for jitter, midi, controldata, softvns
reusable patches
generalised and efficient

i have made various bigger performance patches using red-framework
special gui/bpatchers for stacks, chains, mixers

drawbacks?
only discrete events - no msp
no opengl or shaders
too complicated to perform with
went back to my old os9 patch
eg. learning the effect chain - not re-ordering!

Pages

Subscribe to RSS - visuals