<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE article PUBLIC "-//NLM//DTD Journal Publishing with OASIS Tables v3.0 20080202//EN" "journalpub-oasis3.dtd">
<article xmlns:xlink="http://www.w3.org/1999/xlink" xmlns:mml="http://www.w3.org/1998/Math/MathML" xmlns:oasis="http://docs.oasis-open.org/ns/oasis-exchange/table" xml:lang="en" dtd-version="3.0"><?xmltex \makeatother\@nolinetrue\makeatletter?><?xmltex \bartext{Research article}?>
  <front>
    <journal-meta><journal-id journal-id-type="publisher">GC</journal-id><journal-title-group>
    <journal-title>Geoscience Communication</journal-title>
    <abbrev-journal-title abbrev-type="publisher">GC</abbrev-journal-title><abbrev-journal-title abbrev-type="nlm-ta">Geosci. Commun.</abbrev-journal-title>
  </journal-title-group><issn pub-type="epub">2569-7110</issn><publisher>
    <publisher-name>Copernicus Publications</publisher-name>
    <publisher-loc>Göttingen, Germany</publisher-loc>
  </publisher></journal-meta>
    <article-meta>
      <article-id pub-id-type="doi">10.5194/gc-3-263-2020</article-id><title-group><article-title>Earth system music: music generated from the<?xmltex \hack{\break}?> United Kingdom Earth System Model (UKESM1)</article-title><alt-title>Earth system music</alt-title>
      </title-group><?xmltex \runningauthor{L.~de~Mora et al.}?><?xmltex \runningtitle{Earth system music}?>
      <contrib-group>
        <contrib contrib-type="author" corresp="yes" rid="aff1">
          <name><surname>de Mora</surname><given-names>Lee</given-names></name>
          <email>ledm@pml.ac.uk</email>
        <ext-link>https://orcid.org/0000-0002-5080-3149</ext-link></contrib>
        <contrib contrib-type="author" corresp="no" rid="aff2">
          <name><surname>Sellar</surname><given-names>Alistair A.</given-names></name>
          
        <ext-link>https://orcid.org/0000-0002-2955-7254</ext-link></contrib>
        <contrib contrib-type="author" corresp="no" rid="aff3">
          <name><surname>Yool</surname><given-names>Andrew</given-names></name>
          
        <ext-link>https://orcid.org/0000-0002-9879-2776</ext-link></contrib>
        <contrib contrib-type="author" corresp="no" rid="aff3">
          <name><surname>Palmieri</surname><given-names>Julien</given-names></name>
          
        <ext-link>https://orcid.org/0000-0002-0226-5243</ext-link></contrib>
        <contrib contrib-type="author" corresp="no" rid="aff4 aff5">
          <name><surname>Smith</surname><given-names>Robin S.</given-names></name>
          
        <ext-link>https://orcid.org/0000-0001-7479-7778</ext-link></contrib>
        <contrib contrib-type="author" corresp="no" rid="aff4">
          <name><surname>Kuhlbrodt</surname><given-names>Till</given-names></name>
          
        <ext-link>https://orcid.org/0000-0003-2328-6729</ext-link></contrib>
        <contrib contrib-type="author" corresp="no" rid="aff6 aff7">
          <name><surname>Parker</surname><given-names>Robert J.</given-names></name>
          
        <ext-link>https://orcid.org/0000-0002-0801-0831</ext-link></contrib>
        <contrib contrib-type="author" corresp="no" rid="aff2">
          <name><surname>Walton</surname><given-names>Jeremy</given-names></name>
          
        <ext-link>https://orcid.org/0000-0001-7372-178X</ext-link></contrib>
        <contrib contrib-type="author" corresp="no" rid="aff1">
          <name><surname>Blackford</surname><given-names>Jeremy C.</given-names></name>
          
        </contrib>
        <contrib contrib-type="author" corresp="no" rid="aff8">
          <name><surname>Jones</surname><given-names>Colin G.</given-names></name>
          
        </contrib>
        <aff id="aff1"><label>1</label><institution>Plymouth Marine Laboratory, Plymouth, UK</institution>
        </aff>
        <aff id="aff2"><label>2</label><institution>Met Office Hadley Centre, Exeter, UK</institution>
        </aff>
        <aff id="aff3"><label>3</label><institution>National Oceanography Centre, Southampton, UK</institution>
        </aff>
        <aff id="aff4"><label>4</label><institution>National Centre for Atmospheric Science, Department of Meteorology, University of Reading, Reading, UK</institution>
        </aff>
        <aff id="aff5"><label>5</label><institution>Department of Meteorology, University of Reading, Reading, UK</institution>
        </aff>
        <aff id="aff6"><label>6</label><institution>National Centre for Earth Observation, Leicester, UK</institution>
        </aff>
        <aff id="aff7"><label>7</label><institution>Earth Observation Science, School of Physics and Astronomy, University of Leicester, Leicester, UK</institution>
        </aff>
        <aff id="aff8"><label>8</label><institution>National Centre for Atmospheric Science, School of Earth and Environment, University of Leeds, Leeds, UK</institution>
        </aff>
      </contrib-group>
      <author-notes><corresp id="corr1">Lee de Mora (ledm@pml.ac.uk)</corresp></author-notes><pub-date><day>11</day><month>September</month><year>2020</year></pub-date>
      
      <volume>3</volume>
      <issue>2</issue>
      <fpage>263</fpage><lpage>278</lpage>
      <history>
        <date date-type="received"><day>18</day><month>December</month><year>2019</year></date>
           <date date-type="rev-request"><day>17</day><month>January</month><year>2020</year></date>
           <date date-type="rev-recd"><day>8</day><month>July</month><year>2020</year></date>
           <date date-type="accepted"><day>16</day><month>July</month><year>2020</year></date>
      </history>
      <permissions>
        <copyright-statement>Copyright: © 2020 Lee de Mora et al.</copyright-statement>
        <copyright-year>2020</copyright-year>
      <license license-type="open-access"><license-p>This work is licensed under the Creative Commons Attribution 4.0 International License. To view a copy of this licence, visit <ext-link ext-link-type="uri" xlink:href="https://creativecommons.org/licenses/by/4.0/">https://creativecommons.org/licenses/by/4.0/</ext-link></license-p></license></permissions><self-uri xlink:href="https://gc.copernicus.org/articles/3/263/2020/gc-3-263-2020.html">This article is available from https://gc.copernicus.org/articles/3/263/2020/gc-3-263-2020.html</self-uri><self-uri xlink:href="https://gc.copernicus.org/articles/3/263/2020/gc-3-263-2020.pdf">The full text article is available as a PDF file from https://gc.copernicus.org/articles/3/263/2020/gc-3-263-2020.pdf</self-uri>
      <abstract><title>Abstract</title>
    <p id="d1e214">Scientific data are almost always represented graphically in figures or in videos.
With the ever-growing interest from the general public in understanding
climate sciences, it is becoming increasingly important that scientists present this
information in ways that are both accessible and engaging to non-experts.</p>
    <p id="d1e217">In this pilot study, we use time series data from the first United Kingdom Earth System Model (UKESM1) to
create six procedurally generated musical pieces.
Each of these pieces presents a unique aspect of the ocean component of the UKESM1,
either in terms of a scientific principle or a practical aspect of modelling.
In addition, each piece is arranged using a different musical progression, style and tempo.</p>
    <p id="d1e220">These pieces were created in the Musical Instrument Digital Interface (MIDI) format
and then performed by a digital piano synthesiser.
An associated video showing the time development of the data in time with the music was also created.
The music and video were published on the lead author's YouTube channel.
A brief description of the methodology was also posted alongside the video.
We also discuss the limitations of this pilot study and describe
several approaches to extend and expand upon this work.</p>
  </abstract>
    </article-meta>
  </front>
<body>
      

<sec id="Ch1.S1" sec-type="intro">
  <label>1</label><title>Introduction</title>
      <p id="d1e232">The use of non-speech audio to convey information is known as sonification.
One of the earliest and perhaps the most well known applications of sonification in science is the Geiger counter, a device which produces a distinctive clicking sound when it interacts with ionising radiation
<xref ref-type="bibr" rid="bib1.bibx28" id="paren.1"/>.
Beyond the Geiger counter, sonification is also widely used in monitoring instrumentation.
Sonification is appropriate when the information being displayed
changes in time, includes warnings, or calls for immediate action.
Sonification instrumentation is used in environments where the operator
is unable to use a visual display, for instance if the visual system
is busy with another task, overtaxed or when factors such as smoke, light or line of sight impact the operator's visual system <xref ref-type="bibr" rid="bib1.bibx39" id="paren.2"/>.
Sonification also allows several metrics to be displayed simultaneously
using variations in pitch, timbre, volume and period <xref ref-type="bibr" rid="bib1.bibx24 bib1.bibx13" id="paren.3"/>.
For these reasons, sonification is widely used in medicine for monitoring crucial metrics of patient health <xref ref-type="bibr" rid="bib1.bibx7 bib1.bibx19 bib1.bibx29" id="paren.4"/>.</p>
      <?pagebreak page264?><p id="d1e247">Outside of sonification for monitoring purposes, the sonification
of data can also be used to produce music.
There have been several examples of sonification of climate system data.
“Climate symphony” by Disobedient Films <xref ref-type="bibr" rid="bib1.bibx1" id="paren.5"/>
is a musical composition performed by strings and piano
using observational data from sea ice indices,
surface temperature and carbon dioxide concentration.
Daniel Crawford's “Planetary bands, warming world” <xref ref-type="bibr" rid="bib1.bibx8" id="paren.6"/> is a string quartet
which uses observational data from Northern Hemisphere temperatures.
In this piece, each of the four stringed parts represents
a different latitude band of the Northern Hemisphere temperature over the time range 1880–2012.
Similarly, the Climate Music Project (<uri>https://climatemusic.org/</uri>, last access: 17 August 2020)
is a project which makes original music inspired by climate science.
They have produced three pieces which cover a wide range of climatological
and demographic data and both observational and simulated data.
However, pieces such as those by <xref ref-type="bibr" rid="bib1.bibx1" id="text.7"/> and <xref ref-type="bibr" rid="bib1.bibx8" id="text.8"/> often use similar
observational temperature and carbon dioxide data sets.
Both of these data sets only have monthly data, and approximately one century of data or less are available.
In addition, both temperature and carbon dioxide have risen since the start
of the observational record.
This means that these musical pieces tend to have similar structures and sounds.
The pieces are slow, quiet and low pitched at the start of the data set before slowly increasing and building up to a high-pitched conclusion at the present day.
It should be noted that all the pieces list here are also accompanied by a video which
explains the methodology behind the creation of the music,
shows the performance by the artists
or shows the data development while the music is played.</p>
      <p id="d1e265">An alternative strategy was deployed in the Sounding Coastal Change project <xref ref-type="bibr" rid="bib1.bibx25" id="paren.9"/>.
In that work, sound works, music recordings, photography and film produced through the project
were geotagged and shared on a sound map.
This created a record of the changing
social and environmental soundscape of North Norfolk.
They used these sounds to create music and explore the ways in which
the coast was changing and how people's lives were changing with it.</p>
      <p id="d1e271">In addition to its practical applications, sonification is a
unique field in which scientific and artistic
purposes may coexist <xref ref-type="bibr" rid="bib1.bibx36" id="paren.10"/>.
This is especially true when, in addition to being converted into sound,
the data are also converted into music.
This branch of sonification is called musification.
Note that the philosophical distinction between sound and music is beyond the
scope of this work.
Through the choice of musical scales and chords,
tempo, timbre and volume dynamics,
the composer can attempt to add emotive meaning to the piece.
As such, unlike sonification, musification should be treated as a potentially
biased interpretation of the underlying data.
It cannot be both musical and a truly objective representation of the data.
Furthermore, even though the composer may have made musical and artistic
decisions to link the behaviour of the data with a specific emotional response, it may not necessarily be interpreted in the same way by the
listener.</p>
      <p id="d1e278">With the ever-growing interest from the general public in understanding
climate science, it is becoming increasingly important that we present
our model results and methods
in ways that are accessible and engaging to non-experts.
In this work, six musical pieces
were procedurally generated using output from a climate model,
specifically the first version of the United Kingdom Earth System Model <xref ref-type="bibr" rid="bib1.bibx31" id="paren.11"><named-content content-type="pre">UKESM1;</named-content></xref>.
By using simulated data instead of observational data,
we can generate music from time periods outside the recent past,
such as the pre-industrial period before 1850 and multiple projections of possible future climates.
Similarly, model data allow access to regions and measurements far
beyond what can be found in the observational record.
The UKESM1 is a current generation computational simulation
of the Earth's climate and has been
deployed to understand the historical behaviour of the climate system and make projections
of the climate in the future.
The UKESM1 is described in more detail in Sect. <xref ref-type="sec" rid="Ch1.S2"/>.
The methodology used to produce the pieces, and a brief summary of each piece, is shown in Sect. <xref ref-type="sec" rid="Ch1.S3"/>.
The aims of the project are outlined below in Sect. <xref ref-type="sec" rid="Ch1.S4"/>.</p>
      <p id="d1e292">Each of the six musical pieces was produced alongside a video showing the time series data developing concurrently with the music.
These videos were published on the YouTube video hosting service.
This work was an early pilot study and has revealed several limitations which we outline in Sect. <xref ref-type="sec" rid="Ch1.S5"/>.
We also include some possible extensions, improvements and new directions
for future versions of the work.</p>
</sec>
<sec id="Ch1.S2">
  <label>2</label><title>UKESM1</title>
      <p id="d1e305">The UKESM1 is a computational simulation of the Earth
system produced by a collaboration between the Hadley
Centre Met Office from the United Kingdom and the Natural
Environment Research Council <xref ref-type="bibr" rid="bib1.bibx31" id="paren.12"><named-content content-type="pre">NERC;</named-content></xref>.
The UKESM1 represents a major advancement in Earth system modelling,
including a new atmospheric circulation model with a well resolved stratosphere;
terrestrial biogeochemistry with coupled carbon and nitrogen cycles and enhanced land management;
troposphere–stratospheric chemistry that allows the simulation of radiative forcing from ozone, methane and nitrous oxide;
a fully featured aerosol model;
and an ocean biogeochemistry model with two-way coupling to the carbon cycle and atmospheric aerosols.
The complexity of coupling between the ocean, land and atmosphere physical
climate and biogeochemical cycles in UKESM1 is unprecedented for an Earth system model.</p>
      <?pagebreak page265?><p id="d1e313">In this work, we have exclusively used data from the ocean component of the UKESM1.
The UKESM1's ocean is subdivided into three component models, namely
the Nucleus for European Modelling of the Ocean (NEMO), which simulates the ocean circulation
and thermodynamics <xref ref-type="bibr" rid="bib1.bibx33" id="paren.13"/>,
the Model of Ecosystem Dynamics, nutrient Utilisation, Sequestration and Acidification (MEDUSA), which is the sub-model of the marine biogeochemistry <xref ref-type="bibr" rid="bib1.bibx41" id="paren.14"/>,
and the Los Alamos Sea Ice Model (CICE), which simulates the growth, melt and movement of sea ice <xref ref-type="bibr" rid="bib1.bibx26" id="paren.15"/>.</p>

      <?xmltex \floatpos{t}?><fig id="Ch1.F1" specific-use="star"><?xmltex \currentcnt{1}?><label>Figure 1</label><caption><p id="d1e327">The computational process used to convert UKESM1 data into a musical piece and an associated video.
The boxes with a dark border represent files and data sets, and the arrows and chevrons represent processes.
The blue areas are UKESM1 data and the preprocessing stages,
the green areas show the data and processing stages needed to convert model data into music in the MIDI format,
and the orange area shows the post-processing stages which convert images and MIDI into sheet music and videos.
</p></caption>
        <?xmltex \igopts{width=398.338583pt}?><graphic xlink:href="https://gc.copernicus.org/articles/3/263/2020/gc-3-263-2020-f01.png"/>

      </fig>

      <p id="d1e337">The UKESM1 is being used in the UK's contribution to the sixth international Coupled Model Intercomparison Project (CMIP6)
<xref ref-type="bibr" rid="bib1.bibx11" id="paren.16"/>.
The UKESM1 simulations that were submitted to the CMIP6 were used to generate the musical pieces.
These simulations include the pre-industrial control (PI control), several
historical simulations and many projections of future climate scenarios.
The CMIP6 experiments that were used in these works are listed in Table <xref ref-type="table" rid="Ch1.T1"/>.</p>
      <p id="d1e345">This is not the first time that the UKESM1 has been used to inspire creative projects. In 2017, the UKESM1 participated in a science and poetry project in which a scientist and a writer were paired together to produce poetry.
Ben Smith was paired with Lee de Mora and produced several poems inspired
by the United Kingdom Earth System Model <xref ref-type="bibr" rid="bib1.bibx32" id="paren.17"><named-content content-type="pre">UKESM;</named-content></xref>.</p>
</sec>
<sec id="Ch1.S3">
  <label>3</label><title>Methods</title>
      <p id="d1e361">In this section, we describe the method used to produce the music and the videos.
Figure <xref ref-type="fig" rid="Ch1.F1"/> illustrates this process.
The initial data are UKESM1 model output files, downloaded directly from the United Kingdom's
Met Office data storage system (MASS).
These native-format UKESM1 data will not be available outside the UKESM collaboration,
but selected model variables have been transformed into a standard format and made available
on the Earth System Grid Federation (ESGF) via, for example,
<uri>https://esgf-index1.ceda.ac.uk/search/cmip6-ceda/</uri>, last access:  17 August 2020.</p>
      <p id="d1e369">The time series data are calculated from the UKESM1 data by the BGC-val model evaluation suite <xref ref-type="bibr" rid="bib1.bibx10" id="paren.18"/>.
BGC-val is a software toolkit that was deployed to evaluate the development and performance
of the ocean component of the UKESM1.
In all six pieces, we use annual average data as the time series data.
The data sets that were used in this work are listed in Table <xref ref-type="table" rid="Ch1.T1"/>.</p>
      <p id="d1e377">Each time series data set is used to create an individual Musical Instrument Digital Interface (MIDI) track composed of a series of MIDI notes.
The MIDI protocol is a standardised digital way to convey musical performance information. It can be thought of as the instructions that tell a music synthesiser how to perform a piece of music <xref ref-type="bibr" rid="bib1.bibx34" id="paren.19"/>.
All six pieces shown here are saved as a single MIDI file which contains one or many MIDI tracks played simultaneously. Each MIDI track is composed of a series of MIDI notes.</p>

      <?xmltex \floatpos{t}?><fig id="Ch1.F2" specific-use="star"><?xmltex \currentcnt{2}?><label>Figure 2</label><caption><p id="d1e386">The musical range of each of the data sets used in the “Earth System Allegro”.
The four histograms on the left-hand side show the distributions of the data used in the piece, and the right-hand side shows a standard piano keyboard with the musical range available in each data set.
In this piece, the Drake Passage current, shown in red, is free to vary within a two octave range of the C major scale.
The other three data sets have their own ranges but are limited to the notes in the chord progression, namely C major, G major, A minor and F major.
The dark coloured keys are the notes in C major chord, but the lighter coloured keys show the other notes which are available for the other chords in the progression.
Note that both the C major scale and chord do not include any of the ebony keys on a piano, but these notes could be used if they were within the available range and appeared in the chord progression used.</p></caption>
        <?xmltex \igopts{width=503.61378pt}?><graphic xlink:href="https://gc.copernicus.org/articles/3/263/2020/gc-3-263-2020-f02.png"/>

      </fig>

      <p id="d1e395">Each MIDI note is assigned four parameters.
The first two parameters are timing (when the note occurs in the song)
and duration (the length of time that the note is held).
The timing is the number of beats between this note and the beginning of the song. The duration is positive rational number representing the number of beats for which the note is held. A unity duration is equivalent to a crotchet (quarter note), a duration of two is a minim (half note) and the duration value of a half is a quaver (eighth note).</p>
      <p id="d1e398">The third MIDI note parameter is the pitch which, in MIDI, must be an integer between  1 and 127, where 1 is a very low pitch and 127 is a very high pitch.
These integer values represent the chromatic scale, and middle C is set to a value of 60. The pitch of the MIDI notes must be an integer as there is no capacity for MIDI notes to sit between values on the chromatic scale. Musically this can be explained, as there are not notes in between the notes on a keyboard in MIDI. The total range of available pitches covers 10.5 octaves;
however, we found that pitches below 30 or above 110 started to become unpleasant when performed by TiMidity; other MIDI pianos may have more success. Also note that MIDI's 127 note system extends beyond the standard piano keyboard, which only covers the range 21–108 of the MIDI pitch system.
MIDI uses the 12-tone equal temperament tuning system; while this is not the only tuning system, it is the most widely used in Western music.</p>
      <p id="d1e401">The fourth MIDI note parameter is the velocity; this indicates the speed with which the key would be struck on a piano and is the relative loudness of the note. In practical terms, velocity is an integer ranging between 1 and 127, where 1 is very quiet and 127 is very loud. The overall tempo of the piece is assigned as a global parameter of the MIDI file in units of the number of beats per minute.</p>
      <?pagebreak page266?><p id="d1e404">Each model's time series data set is converted into a series of consecutive MIDI notes, which together form a track. For instance, the sea surface temperature (SST) time series could be converted into a series of MIDI notes in the upper range of the keyboard to form a track. For each track, the time series data are converted into musical notes so that the lowest
value in the data set is represented by the lowest note pitch available,
and the highest value in the data set is represented by the highest note pitch available. The notes in between are assigned proportionally by their data value between the lowest and highest pitched notes. The lowest and highest notes available for each track are predefined in the piece's settings, and they are considered an artistic decision. Each track is given its own customised pitch range so that the tracks may be at a lower pitch, higher pitch or have overlapping pitch ranges relative to other tracks in the piece. The ranges of notes available for the piece “Earth System Allegro”
is shown in Fig. <xref ref-type="fig" rid="Ch1.F2"/>. In this figure, the four histograms on the left-hand side show the distributions of the data used in the piece, and the right-hand side includes four standard piano keyboards showing the musical range available in each data set. For instance, the Drake Passage current ranges between 135 and 175 Tg s<inline-formula><mml:math id="M1" display="inline"><mml:msup><mml:mi/><mml:mrow><mml:mo>-</mml:mo><mml:mn mathvariant="normal">1</mml:mn></mml:mrow></mml:msup></mml:math></inline-formula>
in these simulations, and we selected a range between MIDI pitches 72 and 96.
This means that the lowest Drake Passage current values (135 Tg s<inline-formula><mml:math id="M2" display="inline"><mml:msup><mml:mi/><mml:mrow><mml:mo>-</mml:mo><mml:mn mathvariant="normal">1</mml:mn></mml:mrow></mml:msup></mml:math></inline-formula>) would be represented in MIDI with a pitch of 72,
and the highest Drake Passage current values (175 Tg s<inline-formula><mml:math id="M3" display="inline"><mml:msup><mml:mi/><mml:mrow><mml:mo>-</mml:mo><mml:mn mathvariant="normal">1</mml:mn></mml:mrow></mml:msup></mml:math></inline-formula>)
would be assigned a MIDI pitch of 96, which is two octaves higher.</p>
      <p id="d1e445">These note pitches are then binned into a scale or a chord.
The choice of chord or scale depends on the artistic decisions made by the composer. For instance, the C major chord is composed of the notes C, E and G, which are the
zeroth, fourth and seventh notes, respectively, in the 12-note chromatic scale, starting from C at zero. Figure <xref ref-type="fig" rid="Ch1.F3"/> shows a representation of these notes on a standard piano keyboard. The C major in the zeroth octave is composed of the following set of MIDI pitch integers:
          <disp-formula id="Ch1.E1" content-type="numbered"><label>1</label><mml:math id="M4" display="block"><mml:mrow><mml:mi mathvariant="normal">C</mml:mi><mml:mspace linebreak="nobreak" width="0.25em"/><mml:msub><mml:mi mathvariant="normal">major</mml:mi><mml:mn mathvariant="normal">0</mml:mn></mml:msub><mml:mo>=</mml:mo><mml:mo mathvariant="italic">{</mml:mo><mml:mn mathvariant="normal">0</mml:mn><mml:mo>,</mml:mo><mml:mn mathvariant="normal">4</mml:mn><mml:mo>,</mml:mo><mml:mn mathvariant="normal">7</mml:mn><mml:mo mathvariant="italic">}</mml:mo><mml:mo>.</mml:mo></mml:mrow></mml:math></disp-formula></p>

      <?xmltex \floatpos{t}?><fig id="Ch1.F3"><?xmltex \currentcnt{3}?><label>Figure 3</label><caption><p id="d1e485">A depiction of a standard piano keyboard showing the names of the notes and the number of these notes in MIDI format. The C major chord is highlighted in green, and the zeroth octave is shown in a darker         green than the subsequent octaves.</p></caption>
        <?xmltex \igopts{width=241.848425pt}?><graphic xlink:href="https://gc.copernicus.org/articles/3/263/2020/gc-3-263-2020-f03.png"/>

      </fig>

      <p id="d1e494">In the 12-tone equal temperament tuning system, the 12 named notes are repeated, and each distance of 12 notes represents an octave.
As shown in Fig. <xref ref-type="fig" rid="Ch1.F3"/>, a chord may also include notes from subsequent octaves. In this figure, the C major chord is highlighted in green, and the zeroth octave is shown in a darker
green than the subsequent octaves.
As such, the C major chord can be formed from any of the following sets of MIDI pitches:
          <disp-formula id="Ch1.E2" content-type="numbered"><label>2</label><mml:math id="M5" display="block"><mml:mrow><mml:mi mathvariant="normal">C</mml:mi><mml:mspace linebreak="nobreak" width="0.25em"/><mml:msub><mml:mi mathvariant="normal">major</mml:mi><mml:mrow><mml:mn mathvariant="normal">0</mml:mn><mml:mo>,</mml:mo><mml:mn mathvariant="normal">1</mml:mn><mml:mo>,</mml:mo><mml:mn mathvariant="normal">2</mml:mn><mml:mo>,</mml:mo><mml:mi mathvariant="normal">…</mml:mi></mml:mrow></mml:msub><mml:mo>=</mml:mo><mml:mo mathvariant="italic">{</mml:mo><mml:mn mathvariant="normal">0</mml:mn><mml:mo>,</mml:mo><mml:mn mathvariant="normal">4</mml:mn><mml:mo>,</mml:mo><mml:mn mathvariant="normal">7</mml:mn><mml:mo>,</mml:mo><mml:mn mathvariant="normal">12</mml:mn><mml:mo>,</mml:mo><mml:mn mathvariant="normal">16</mml:mn><mml:mo>,</mml:mo><mml:mn mathvariant="normal">19</mml:mn><mml:mo>,</mml:mo><mml:mn mathvariant="normal">24</mml:mn><mml:mo>,</mml:mo><mml:mn mathvariant="normal">28</mml:mn><mml:mo>,</mml:mo><mml:mn mathvariant="normal">31</mml:mn><mml:mi mathvariant="normal">…</mml:mi><mml:mn mathvariant="normal">127</mml:mn><mml:mo mathvariant="italic">}</mml:mo><mml:mo>.</mml:mo></mml:mrow></mml:math></disp-formula></p>
      <p id="d1e573">It then follows that the notes of the C major chord are values between 0 an 127, where the following condition is true:
          <disp-formula id="Ch1.Ex1"><mml:math id="M6" display="block"><mml:mrow><mml:mi>p</mml:mi><mml:mo>∈</mml:mo><mml:mi mathvariant="normal">C</mml:mi><mml:mspace width="0.25em" linebreak="nobreak"/><mml:msub><mml:mi mathvariant="normal">major</mml:mi><mml:mrow><mml:mn mathvariant="normal">0</mml:mn><mml:mo>,</mml:mo><mml:mn mathvariant="normal">1</mml:mn><mml:mo>,</mml:mo><mml:mn mathvariant="normal">2</mml:mn><mml:mo>,</mml:mo><mml:mi mathvariant="normal">…</mml:mi></mml:mrow></mml:msub></mml:mrow></mml:math></disp-formula>
        This can be can be written more simply as follows:
          <disp-formula id="Ch1.Ex2"><mml:math id="M7" display="block"><mml:mrow><mml:mi>p</mml:mi><mml:mi mathvariant="italic">%</mml:mi><mml:mn mathvariant="normal">12</mml:mn><mml:mo>∈</mml:mo><mml:mi mathvariant="normal">C</mml:mi><mml:mspace linebreak="nobreak" width="0.25em"/><mml:msub><mml:mi mathvariant="normal">major</mml:mi><mml:mn mathvariant="normal">0</mml:mn></mml:msub><mml:mo>,</mml:mo></mml:mrow></mml:math></disp-formula>
        where <inline-formula><mml:math id="M8" display="inline"><mml:mi>p</mml:mi></mml:math></inline-formula> represents the pitch value, namely an integer between the minimum and maximum pitches provided in the settings, and the percent sign (%) represents the remainder operator.</p>
      <p id="d1e638">The zeroth octave values for other chords and scales with the same root note can be calculated from their chromatic relation with the root note.
For instance:

              <disp-formula specific-use="gather"><mml:math id="M9" display="block"><mml:mtable displaystyle="true"><mml:mtr><mml:mtd><mml:mrow><mml:mstyle displaystyle="true" class="stylechange"/><mml:mi mathvariant="normal">C</mml:mi><mml:mspace linebreak="nobreak" width="0.25em"/><mml:msub><mml:mi mathvariant="normal">minor</mml:mi><mml:mn mathvariant="normal">0</mml:mn></mml:msub><mml:mo>=</mml:mo><mml:mo mathvariant="italic">{</mml:mo><mml:mn mathvariant="normal">0</mml:mn><mml:mo>,</mml:mo><mml:mn mathvariant="normal">3</mml:mn><mml:mo>,</mml:mo><mml:mn mathvariant="normal">7</mml:mn><mml:mo mathvariant="italic">}</mml:mo></mml:mrow></mml:mtd></mml:mtr><mml:mtr><mml:mtd><mml:mrow><mml:mstyle displaystyle="true" class="stylechange"/><mml:mi mathvariant="normal">C</mml:mi><mml:mspace width="0.25em" linebreak="nobreak"/><mml:msubsup><mml:mi mathvariant="normal">major</mml:mi><mml:mn mathvariant="normal">0</mml:mn><mml:mn mathvariant="normal">7</mml:mn></mml:msubsup><mml:mo>=</mml:mo><mml:mo mathvariant="italic">{</mml:mo><mml:mn mathvariant="normal">0</mml:mn><mml:mo>,</mml:mo><mml:mn mathvariant="normal">4</mml:mn><mml:mo>,</mml:mo><mml:mn mathvariant="normal">7</mml:mn><mml:mo>,</mml:mo><mml:mn mathvariant="normal">11</mml:mn><mml:mo mathvariant="italic">}</mml:mo></mml:mrow></mml:mtd></mml:mtr><mml:mtr><mml:mtd><mml:mrow><mml:mstyle displaystyle="true" class="stylechange"/><mml:mi mathvariant="normal">C</mml:mi><mml:mspace linebreak="nobreak" width="0.25em"/><mml:msubsup><mml:mi mathvariant="normal">minor</mml:mi><mml:mn mathvariant="normal">0</mml:mn><mml:mn mathvariant="normal">7</mml:mn></mml:msubsup><mml:mo>=</mml:mo><mml:mo mathvariant="italic">{</mml:mo><mml:mn mathvariant="normal">0</mml:mn><mml:mo>,</mml:mo><mml:mn mathvariant="normal">3</mml:mn><mml:mo>,</mml:mo><mml:mn mathvariant="normal">7</mml:mn><mml:mo>,</mml:mo><mml:mn mathvariant="normal">10</mml:mn><mml:mo mathvariant="italic">}</mml:mo></mml:mrow></mml:mtd></mml:mtr><mml:mtr><mml:mtd><mml:mrow><mml:mstyle displaystyle="true" class="stylechange"/><mml:mi mathvariant="normal">…</mml:mi></mml:mrow></mml:mtd></mml:mtr></mml:mtable></mml:math></disp-formula>

          Note that the derivation of these chords and their nomenclature is beyond the
scope of this work. For more information<?pagebreak page267?> on music theory, please consult an introductory guide to music theory such as <xref ref-type="bibr" rid="bib1.bibx30" id="text.20"/> or <xref ref-type="bibr" rid="bib1.bibx3" id="text.21"/>.</p>
      <p id="d1e758">The zeroth octave values for other keys can be included by appending the root note of the scale (C: 0, C#/Db: 1, D: 2, D#/Eb: 3 and so on) to the relationships in the key of C above.
<?xmltex \hack{\newpage}?>

For instance:

              <disp-formula specific-use="gather"><mml:math id="M10" display="block"><mml:mtable displaystyle="true"><mml:mtr><mml:mtd><mml:mrow><mml:mstyle displaystyle="true" class="stylechange"/><mml:mi mathvariant="normal">C</mml:mi><mml:mspace linebreak="nobreak" width="0.25em"/><mml:msub><mml:mi mathvariant="normal">major</mml:mi><mml:mn mathvariant="normal">0</mml:mn></mml:msub><mml:mo>=</mml:mo><mml:mo mathvariant="italic">{</mml:mo><mml:mn mathvariant="normal">0</mml:mn><mml:mo>,</mml:mo><mml:mn mathvariant="normal">4</mml:mn><mml:mo>,</mml:mo><mml:mn mathvariant="normal">7</mml:mn><mml:mo mathvariant="italic">}</mml:mo></mml:mrow></mml:mtd></mml:mtr><mml:mtr><mml:mtd><mml:mrow><mml:mstyle class="stylechange" displaystyle="true"/><mml:mi mathvariant="normal">C</mml:mi><mml:mi mathvariant="italic">#</mml:mi><mml:mspace width="0.25em" linebreak="nobreak"/><mml:msub><mml:mi mathvariant="normal">major</mml:mi><mml:mn mathvariant="normal">0</mml:mn></mml:msub><mml:mo>=</mml:mo><mml:mo mathvariant="italic">{</mml:mo><mml:mn mathvariant="normal">0</mml:mn><mml:mo>,</mml:mo><mml:mn mathvariant="normal">4</mml:mn><mml:mo>,</mml:mo><mml:mn mathvariant="normal">7</mml:mn><mml:mo mathvariant="italic">}</mml:mo><mml:mo>+</mml:mo><mml:mn mathvariant="normal">1</mml:mn><mml:mo>=</mml:mo><mml:mo mathvariant="italic">{</mml:mo><mml:mn mathvariant="normal">1</mml:mn><mml:mo>,</mml:mo><mml:mn mathvariant="normal">5</mml:mn><mml:mo>,</mml:mo><mml:mn mathvariant="normal">8</mml:mn><mml:mo mathvariant="italic">}</mml:mo></mml:mrow></mml:mtd></mml:mtr><mml:mtr><mml:mtd><mml:mrow><mml:mstyle displaystyle="true" class="stylechange"/><mml:mi mathvariant="normal">D</mml:mi><mml:mspace width="0.25em" linebreak="nobreak"/><mml:msub><mml:mi mathvariant="normal">major</mml:mi><mml:mn mathvariant="normal">0</mml:mn></mml:msub><mml:mo>=</mml:mo><mml:mo mathvariant="italic">{</mml:mo><mml:mn mathvariant="normal">0</mml:mn><mml:mo>,</mml:mo><mml:mn mathvariant="normal">4</mml:mn><mml:mo>,</mml:mo><mml:mn mathvariant="normal">7</mml:mn><mml:mo mathvariant="italic">}</mml:mo><mml:mo>+</mml:mo><mml:mn mathvariant="normal">2</mml:mn><mml:mo>=</mml:mo><mml:mo mathvariant="italic">{</mml:mo><mml:mn mathvariant="normal">2</mml:mn><mml:mo>,</mml:mo><mml:mn mathvariant="normal">6</mml:mn><mml:mo>,</mml:mo><mml:mn mathvariant="normal">9</mml:mn><mml:mo mathvariant="italic">}</mml:mo></mml:mrow></mml:mtd></mml:mtr><mml:mtr><mml:mtd><mml:mrow><mml:mstyle class="stylechange" displaystyle="true"/><mml:mi mathvariant="normal">D</mml:mi><mml:mi mathvariant="italic">#</mml:mi><mml:mspace linebreak="nobreak" width="0.25em"/><mml:msub><mml:mi mathvariant="normal">major</mml:mi><mml:mn mathvariant="normal">0</mml:mn></mml:msub><mml:mo>=</mml:mo><mml:mo mathvariant="italic">{</mml:mo><mml:mn mathvariant="normal">0</mml:mn><mml:mo>,</mml:mo><mml:mn mathvariant="normal">4</mml:mn><mml:mo>,</mml:mo><mml:mn mathvariant="normal">7</mml:mn><mml:mo mathvariant="italic">}</mml:mo><mml:mo>+</mml:mo><mml:mn mathvariant="normal">3</mml:mn><mml:mo>=</mml:mo><mml:mo mathvariant="italic">{</mml:mo><mml:mn mathvariant="normal">3</mml:mn><mml:mo>,</mml:mo><mml:mn mathvariant="normal">7</mml:mn><mml:mo>,</mml:mo><mml:mn mathvariant="normal">10</mml:mn><mml:mo mathvariant="italic">}</mml:mo></mml:mrow></mml:mtd></mml:mtr><mml:mtr><mml:mtd><mml:mrow><mml:mstyle displaystyle="true" class="stylechange"/><mml:mi mathvariant="normal">…</mml:mi></mml:mrow></mml:mtd></mml:mtr></mml:mtable></mml:math></disp-formula></p>
      <p id="d1e956">Using these methods, we can combinatorially create a list of all the MIDI
pitches in the zeroth octave for all 12 keys for most standard musical chords. From this list, we can convert model data into nearly any choice of chord or scale.</p>
      <p id="d1e960">The conversion from model data to musical pitch is performed using the
following method. First, the data are translated into the pitch scale but kept as a rational number between the minimum and maximum pitch range assigned by the composer for this data set. As an example, in the piece “Earth System Allegro” the Drake Passage current was
assigned a pitch range between 72 and 96, as shown in Fig. <xref ref-type="fig" rid="Ch1.F2"/>.
Once the set of possible integer pitches for a given chord or scale has
been produced using the methods described above, the in-scale MIDI pitch with this smallest distance to this rational number pitch is used.
As mentioned earlier, the pitch of<?pagebreak page268?> the MIDI notes must be an integer as there is no capacity for MIDI notes to sit between values on the chromatic scale. The choice of scale is provided in the piece's settings and is an artistic choice made by the composer. Furthermore, instead of using a single chord or scale for a piece, it is also possible to use a repeating pattern of chords or a chord progression. The choice of chords, and the order of chords, is different for each piece. In addition, the number of beats between chord changes, and the number of notes per beat, is also assigned in the settings. Furthermore, each track in a given piece may use a different chord progression.</p>
      <p id="d1e965">The velocity of notes is determined using a similar method to pitch; the time series data are converted into velocities so that the lowest value in the data set is the quietest value available, and the highest value of the data set is the loudest value available. The notes in between are assigned proportionally by their data value between the quietest and loudest notes.
Each track may have its own customised velocity range, such that any given track may be louder or quieter than the other tracks in a piece.
The choice of data set used to determine velocity is provided in the settings. We rarely used the same data set for both pitch and velocity.
This is because it results in the high-pitch notes being louder
and the low-pitch notes being quieter.</p>
      <p id="d1e968">After binning the notes into the appropriate scales, all notes are initially the same duration. If the same pitched note is played successively, then the first note's duration is extended and the repeated notes are removed.</p>
      <p id="d1e971">A smoothing function may also be applied to the data before the data set is converted into musical notes. Smoothing means that it is more likely that the same pitched note will be played successively, so a track with a larger smoothing window will have fewer notes than a track with a smaller window.
From a musical perspective, smoothing slows down the piece by replacing fast short notes with longer slower notes. Smoothing can also be used to slow down the backing parts to highlight a faster moving melody. Nearly all the pieces described here used a smoothing window.</p>
      <p id="d1e974">After applying this method to multiple tracks, they are saved together
in a single MIDI file using the Python MIDITime library <xref ref-type="bibr" rid="bib1.bibx5" id="paren.22"/>. Having created the MIDI file, the piece is performed by the TiMidity<inline-formula><mml:math id="M11" display="inline"><mml:mrow><mml:mo>+</mml:mo><mml:mo>+</mml:mo></mml:mrow></mml:math></inline-formula> digital piano <xref ref-type="bibr" rid="bib1.bibx15" id="paren.23"/>, which converts the MIDI format into
a digital audio performance in the MP3 format. In principle, it should be possible to use alternative MIDI instruments, but for this limited study we exclusively used the TiMidity<inline-formula><mml:math id="M12" display="inline"><mml:mrow><mml:mo>+</mml:mo><mml:mo>+</mml:mo></mml:mrow></mml:math></inline-formula> digital piano. Where possible, the MIDI files were converted into sheet music portable document format (PDF) files using the MuseScore software <xref ref-type="bibr" rid="bib1.bibx20" id="paren.24"/>. However, it is not possible to produce sheet music for all six pieces as some have too many MIDI tracks to be converted to sheet music by this software.</p>
      <p id="d1e1006">Each piece has a diverse range of settings and artistic choices made by the composer, including the choice of data sets used to determine pitch and velocity for each track, the pitch and velocity ranges for each track,
the piece's tempo and the number of notes per beat, the musical key and chord progression for each track, and the width of the smoothing window.
The choice of instrument is also another artistic choice, although in this work only one instrument was used, namely the TiMidity<inline-formula><mml:math id="M13" display="inline"><mml:mo>+</mml:mo></mml:math></inline-formula> piano synthesiser.
As a whole, these decisions allow the composer to attempt to define the emotional context of the final piece. For instance, a fast-paced piece in a major progression may sound happy and cheerful to an audience who are used to associating fast-paced songs in major keys with happy and cheerful environments. It should be mentioned that there are no strict rules governing the emotional context of chords, tempo or instrument, and the emotional contexts of harmonies, timbres and tempos differ between cultures.
Nevertheless, through exploiting the standard behaviours of Western musical traditions, the composer can attempt to imbue the piece with emotional musical cues that fit the theme of the piece or the behaviour of the underlying climate data.</p>
      <p id="d1e1017">To create a video, we produced an image for each time step in each piece.
These figures show the data once they have been converted and binned into musical notes using units of the original data. A still image from each video is shown in Fig. <xref ref-type="fig" rid="Ch1.F4"/>. The FFmpeg video editing software <xref ref-type="bibr" rid="bib1.bibx12" id="paren.25"/> was used to convert the set of images into a video and to add the MP3 as the soundtrack.</p>
      <p id="d1e1025">The finished videos were uploaded onto the lead author's YouTube channel<fn id="Ch1.Footn1"><p id="d1e1028">See <uri>https://www.youtube.com/c/LeedeMora</uri>, last access: 17 August 2020.</p></fn> <xref ref-type="bibr" rid="bib1.bibx9" id="paren.26"/>.</p>

      <?xmltex \floatpos{t}?><fig id="Ch1.F4" specific-use="star"><?xmltex \currentcnt{4}?><label>Figure 4</label><caption><p id="d1e1040">The final frame of each of the six videos. The frames of the videos are shown in the order that they were published. The videos <bold>(1)</bold>, <bold>(3)</bold>, <bold>(5)</bold> and <bold>(6)</bold> use a consistent <inline-formula><mml:math id="M14" display="inline"><mml:mi>x</mml:mi></mml:math></inline-formula> axis for the duration of the video, but videos <bold>(2)</bold> and <bold>(4)</bold> have rolling <inline-formula><mml:math id="M15" display="inline"><mml:mi>x</mml:mi></mml:math></inline-formula> axes that change over the course of the video. This means that panels <bold>(2)</bold> and <bold>(4)</bold> show only a small part of time range. Panel <bold>(5)</bold> includes two vertical lines showing the jumps in the spin-up piece. Panel <bold>(6)</bold> shows a single vertical line for the crossover between the historical and future scenarios.</p></caption>
        <?xmltex \igopts{width=483.69685pt}?><graphic xlink:href="https://gc.copernicus.org/articles/3/263/2020/gc-3-263-2020-f04.png"/>

      </fig>

</sec>
<sec id="Ch1.S4">
  <label>4</label><title>Works</title>
      <p id="d1e1103">Six pieces were composed, generated and published using the methods described here. These pieces and their web addresses are below. Note that each of these videos' last access before this paper was published was 17 August 2020.
<list list-type="order"><list-item>
      <p id="d1e1108">“Earth System Allegro”; <uri>https://www.youtube.com/watch?v=RxBhLNPH8ls</uri></p></list-item><list-item>
      <p id="d1e1114">“Pre-industrial Vivace”; <uri>https://www.youtube.com/watch?v=Hnkvkx4BMk4</uri></p></list-item><list-item>
      <p id="d1e1120">“Ocean Acidification in E minor”; <uri>https://www.youtube.com/watch?v=FPeSAA38MjI</uri></p></list-item><list-item>
      <p id="d1e1126">“Sea Surface Temperature Aria”; <uri>https://www.youtube.com/watch?v=SYEncjETkZA</uri></p></list-item><list-item>
      <p id="d1e1132">“Giant Steps Spin Up”; <uri>https://www.youtube.com/watch?v=fSK6ayp4i4w</uri></p></list-item><list-item>
      <p id="d1e1138">“Seven Levels of Climate Change”; <uri>https://www.youtube.com/watch?v=2YE9uHBE5OI</uri></p></list-item></list></p>
      <?pagebreak page270?><p id="d1e1143">The main goals of the work were to generate music using climate model data and to use music to illustrate some standard practices in Earth system modelling that might not be widely known outside our community. Beyond these broader goals, each piece had its own unique goal; for example, to demonstrate the principles of sonification using UKESM1 data in the “Earth System Allegro”. The “Pre-industrial Vivace” introduces the concept of a PI control simulation and highlights how an emotional connection can be made between the model output and the sonification of the data. The goal of the “Sea Surface Temperature Aria” is to demonstrate the range of behaviours of the future climate projections. “Ocean Acidification in E minor” aims to show the impact of rising atmospheric <inline-formula><mml:math id="M16" display="inline"><mml:mrow class="chem"><mml:msub><mml:mi mathvariant="normal">CO</mml:mi><mml:mn mathvariant="normal">2</mml:mn></mml:msub></mml:mrow></mml:math></inline-formula> on ocean acidification and also to illustrate how historical runs are branched from the PI control. The “Giant Steps Spin Up” shows the process of spinning up the marine component of the UKESM1, and finally, the “Seven Levels of Climate Change” aims to use the musical principles of jazz harmonisation to distinguish the full set of UKESM1's future scenario simulations.</p>
      <p id="d1e1157">These six pieces are summarised in Fig. <xref ref-type="fig" rid="Ch1.F4"/> and Table <xref ref-type="table" rid="Ch1.T1"/>. Figure <xref ref-type="fig" rid="Ch1.F4"/> shows the final frame of each of the pieces, and Table <xref ref-type="table" rid="Ch1.T1"/> shows the summary of each of the videos, including the publication date and duration, and lists the experiments and data sets used to generate the piece.</p>

<?xmltex \floatpos{p}?><table-wrap id="Ch1.T1" specific-use="star" orientation="landscape"><?xmltex \currentcnt{1}?><label>Table 1</label><caption><p id="d1e1172">The video publication details, including the publication date, the duration, the Coupled Model Intercomparison Project (CMIP) experiment names and the data sets used. Note: DIC – dissolved inorganic carbon; PI control – pre-industrial control; SSP – shared socioeconomic pathway; SST – sea surface temperature. Note that each of these videos' last access before this paper was published was 17 August 2020.</p></caption><oasis:table frame="topbot"><oasis:tgroup cols="5">
     <oasis:colspec colnum="1" colname="col1" align="justify" colwidth="4cm"/>
     <oasis:colspec colnum="2" colname="col2" align="left"/>
     <oasis:colspec colnum="3" colname="col3" align="right"/>
     <oasis:colspec colnum="4" colname="col4" align="justify" colwidth="5cm"/>
     <oasis:colspec colnum="5" colname="col5" align="justify" colwidth="7.5cm"/>
     <oasis:thead>
       <oasis:row>
         <oasis:entry colname="col1">Video title</oasis:entry>
         <oasis:entry colname="col2">Publication date</oasis:entry>
         <oasis:entry colname="col3">Duration</oasis:entry>
         <oasis:entry colname="col4">Experiments</oasis:entry>
         <oasis:entry colname="col5">Data sets</oasis:entry>
       </oasis:row>
       <oasis:row rowsep="1">
         <oasis:entry colname="col1"/>
         <oasis:entry colname="col2">(dd-mm-yyyy)</oasis:entry>
         <oasis:entry colname="col3">(mm:ss)</oasis:entry>
         <oasis:entry colname="col4"/>
         <oasis:entry colname="col5"/>
       </oasis:row>
     </oasis:thead>
     <oasis:tbody>
       <oasis:row>
         <oasis:entry colname="col1">“Earth System Allegro”</oasis:entry>
         <oasis:entry colname="col2">21-08-2019</oasis:entry>
         <oasis:entry colname="col3">01:02</oasis:entry>
         <oasis:entry colname="col4">Historical, SSP1 2.5</oasis:entry>
         <oasis:entry colname="col5">Drake Passage current, total air–sea flux of <inline-formula><mml:math id="M17" display="inline"><mml:mrow class="chem"><mml:msub><mml:mi mathvariant="normal">CO</mml:mi><mml:mn mathvariant="normal">2</mml:mn></mml:msub></mml:mrow></mml:math></inline-formula>,</oasis:entry>
       </oasis:row>
       <oasis:row rowsep="1">
         <oasis:entry colname="col1">(<uri>https://www.youtube.com/watch?v=RxBhLNPH8ls</uri>)</oasis:entry>
         <oasis:entry colname="col2"/>
         <oasis:entry colname="col3"/>
         <oasis:entry colname="col4"/>
         <oasis:entry colname="col5">Southern Hemisphere ice extent and Southern Ocean SST</oasis:entry>
       </oasis:row>
       <oasis:row>
         <oasis:entry colname="col1">“Pre-industrial Vivace”</oasis:entry>
         <oasis:entry colname="col2">21-08-2019</oasis:entry>
         <oasis:entry colname="col3">02:27</oasis:entry>
         <oasis:entry colname="col4">PI control</oasis:entry>
         <oasis:entry colname="col5">Total primary production, global mean sea surface,</oasis:entry>
       </oasis:row>
       <oasis:row rowsep="1">
         <oasis:entry colname="col1">(<uri>https://www.youtube.com/watch?v=Hnkvkx4BMk4</uri>)</oasis:entry>
         <oasis:entry colname="col2"/>
         <oasis:entry colname="col3"/>
         <oasis:entry colname="col4"/>
         <oasis:entry colname="col5">chlorophyll, SST, SSS, total ice extent</oasis:entry>
       </oasis:row>
       <oasis:row>
         <oasis:entry colname="col1">“Ocean Acidification <?xmltex \hack{\hfill\break}?>in E minor”</oasis:entry>
         <oasis:entry colname="col2">22-08-2019</oasis:entry>
         <oasis:entry colname="col3">01:56</oasis:entry>
         <oasis:entry colname="col4">PI control, historical</oasis:entry>
         <oasis:entry colname="col5">Global mean surface DIC</oasis:entry>
       </oasis:row>
       <oasis:row rowsep="1">
         <oasis:entry colname="col1">(<uri>https://www.youtube.com/watch?v=FPeSAA38MjI</uri>)</oasis:entry>
         <oasis:entry colname="col2"/>
         <oasis:entry colname="col3"/>
         <oasis:entry colname="col4"/>
         <oasis:entry colname="col5">Global mean surface mean pH</oasis:entry>
       </oasis:row>
       <oasis:row>
         <oasis:entry colname="col1">“Sea Surface <?xmltex \hack{\hfill\break}?>Temperature Aria”</oasis:entry>
         <oasis:entry colname="col2">02-09-2019</oasis:entry>
         <oasis:entry colname="col3">01:17</oasis:entry>
         <oasis:entry colname="col4">PI control, historical,<?xmltex \hack{\hfill\break}?>SSP1 1.9, SSP5 3.4 OS, SSP5 8.5</oasis:entry>
         <oasis:entry colname="col5">Global mean SST</oasis:entry>
       </oasis:row>
       <oasis:row rowsep="1">
         <oasis:entry colname="col1">(<uri>https://www.youtube.com/watch?v=SYEncjETkZA</uri>)</oasis:entry>
         <oasis:entry colname="col2"/>
         <oasis:entry colname="col3"/>
         <oasis:entry colname="col4"/>
         <oasis:entry colname="col5"/>
       </oasis:row>
       <oasis:row rowsep="1">
         <oasis:entry colname="col1">“Giant Steps Spin Up”<?xmltex \hack{\hfill\break}?>(<uri>https://www.youtube.com/watch?v=fSK6ayp4i4w</uri>)</oasis:entry>
         <oasis:entry colname="col2">13-09-2019</oasis:entry>
         <oasis:entry colname="col3">02:52</oasis:entry>
         <oasis:entry colname="col4">Spin up</oasis:entry>
         <oasis:entry colname="col5">Atlantic meridional overturning current, Arctic ice extent, Arctic mean air–sea flux of <inline-formula><mml:math id="M18" display="inline"><mml:mrow class="chem"><mml:msub><mml:mi mathvariant="normal">CO</mml:mi><mml:mn mathvariant="normal">2</mml:mn></mml:msub></mml:mrow></mml:math></inline-formula>, volume-weighted mean temperament of the Arctic Ocean, global surface mean DIC, mean surface chlorophyll in the arctic</oasis:entry>
       </oasis:row>
       <oasis:row>
         <oasis:entry colname="col1">“Seven Levels of<?xmltex \hack{\hfill\break}?>Climate Change”<?xmltex \hack{\hfill\break}?>(<uri>https://www.youtube.com/watch?v=2YE9uHBE5OI</uri>)</oasis:entry>
         <oasis:entry colname="col2">14-10-2019</oasis:entry>
         <oasis:entry colname="col3">02:55</oasis:entry>
         <oasis:entry colname="col4">PI control, historical,  SSP1 1.9,<?xmltex \hack{\hfill\break}?>SSP1 2.6, SSP4 3.4, SSP5 3.4 –<?xmltex \hack{\hfill\break}?>overshoot, SSP2 4.5,<?xmltex \hack{\hfill\break}?>SSP3 7.0, SSP5 8.5</oasis:entry>
         <oasis:entry colname="col5">Global mean SST, pH, Drake Passage current, global mean surface chlorophyll, global total air–sea flux of <inline-formula><mml:math id="M19" display="inline"><mml:mrow class="chem"><mml:msub><mml:mi mathvariant="normal">CO</mml:mi><mml:mn mathvariant="normal">2</mml:mn></mml:msub></mml:mrow></mml:math></inline-formula>, global total ice extent</oasis:entry>
       </oasis:row>
     </oasis:tbody>
   </oasis:tgroup></oasis:table></table-wrap>

<sec id="Ch1.S4.SS1">
  <label>4.1</label><title>“Earth System Allegro”</title>
      <p id="d1e1474">The “Earth System Allegro” is a relatively fast-paced piece in C major, showing
some important metrics of the Southern Ocean in the recent past
and projected into the future with the shared socioeconomic pathway (SSP) scenario, SSP1 1.9.
The SSP1 1.9 projection is the future scenario in which the anthropogenic impact on the climate is the smallest.
The C major scale is composed of only natural notes (no sharp or flat notes),
making it one of the first chords that people encounter when learning music.
In addition, major chords and scales like C major typically sound happy. Christian Schubart's “Ideen zu einer Aesthetik der Tonkunst” (1806)
describes C major as “Completely pure. Its character is: innocence, simplicity, naivety, children's talk” (Schubart and DuBois, 1983)
Through choosing C major and an upbeat tempo and data from the best possible climate scenario (SSP1 1.9), we aimed to start the project with a piece with a sense of optimism about the future climate and to introduce the principles of musification of the UKESM1 time series data.</p>
      <p id="d1e1477">The Drake Passage current, shown in panel (1) of Fig. <xref ref-type="fig" rid="Ch1.F4"/>,
is a measure of the strongest current in the ocean, namely the Antarctic Circumpolar current. This is the current that flows eastwards around Antarctica. The second data set, shown here in orange, is the global total air to sea flux of <inline-formula><mml:math id="M20" display="inline"><mml:mrow class="chem"><mml:msub><mml:mi mathvariant="normal">CO</mml:mi><mml:mn mathvariant="normal">2</mml:mn></mml:msub></mml:mrow></mml:math></inline-formula>. This field shows the global total atmospheric carbon dioxide that is absorbed into the ocean each year. Even under SSP1 1.9, UKESM1 predicts that this value would rise from around
zero during the pre-industrial period to a maximum of approximately
2 Pg of carbon per year around the year 2030, followed by a return to zero at the end of the century. The third field is the sea ice extent of the Southern Hemisphere, shown in blue. This is the total area of the ocean in the Southern Hemisphere which has more that 15 % ice coverage per grid cell of our model. The fourth field is the Southern Ocean mean surface temperature, shown in green, which rises slightly from approximately 5 <inline-formula><mml:math id="M21" display="inline"><mml:msup><mml:mi/><mml:mo>∘</mml:mo></mml:msup></mml:math></inline-formula>C in the pre-industrial period up to a maximum of 6 <inline-formula><mml:math id="M22" display="inline"><mml:msup><mml:mi/><mml:mo>∘</mml:mo></mml:msup></mml:math></inline-formula>C.
The ranges of each data set are illustrated in Fig. <xref ref-type="fig" rid="Ch1.F2"/>.</p>
      <p id="d1e1513">In this piece, the Drake Passage current is set to the C major scale, but the other three parts modulate between the C major, G major, A minor and F major chords.
These are the first, fifth, sixth and fourth chords in the root of C major. This progression is strikingly popular and may be heard in songs such as “Let it be” by the Beatles, “No woman no cry” by Bob Marley and the Whalers, “With or without you” by U2, “I’m yours” by Jason Mraz and “Africa” by Toto, among many others. By choosing such a common progression, we were aiming to introduce the concept of musification of data using familiar-sounding music and to avoid alienating the audience.</p>
</sec>
<sec id="Ch1.S4.SS2">
  <label>4.2</label><title>“Pre-industrial Vivace”</title>
      <p id="d1e1524">The “Pre-industrial Vivace” is a fast-paced piece in C major, showing various metrics of the behaviour of the global ocean in the PI control run. The PI control run is a long-term simulation of the Earth's climate without the impact of the industrial revolution or any of the subsequent human impact on climate. At the time that the piece was created, there were approximately 1400 simulated years. We use the control run as the starting point for historical simulations but also to compare the difference between human-influenced simulations and simulations of the ocean without any anthropogenic impact.</p>
      <p id="d1e1527">The final frame of the “Pre-industrial Vivace” video is shown in panel (2) of Fig. <xref ref-type="fig" rid="Ch1.F4"/>. The top pane of this video shows the global marine primary production in purple. The primary production is a measure of how much marine phytoplankton is growing. Similarly, the second pane shows the global marine surface chlorophyll concentration in green; this line rises and falls alongside the primary production in most cases. The third and fourth panes show the global mean sea surface temperature and sea surface salinity (SSS) in red and orange. The fifth pane shows the global total ice extent. These five fields are an overview of the behaviour of the pristine natural ocean of our Earth system model. There is no significant drift, and there is no long-term trend in any of these fields. However, there is significant natural variability operating at decadal and millennial scales.</p>
      <p id="d1e1532">As with the “Earth System Allegro”, “Pre-industrial Vivace”
uses the familiar C major scale but adds a slight variation to the chord progression. The first half of the progression is C major, G major, A minor and F major,
but it follows with a common variant of this progression, namely C major, D minor, E minor and F major.
Through using the lively vivace tempo and a familiar chord progression
in a major key, this piece aims to use musification to link the PI control simulation with a sense of happiness and ease.
The lively, fast and jovial tone of the piece should match the pre-industrial
environment, which is free running and uninhibited by anthropogenic pollution.</p>
</sec>
<sec id="Ch1.S4.SS3">
  <label>4.3</label><title>“Sea Surface Temperature Aria”</title>
      <?pagebreak page272?><p id="d1e1543">The “Sea Surface Temperature Aria” demonstrates the change in sea surface temperature in the PI control run, the historical scenario and under three future climate projection scenarios, as shown in panel (3) of Fig. <xref ref-type="fig" rid="Ch1.F4"/>. The first scenario is the “business as usual” scenario (SSP5 8.5; shown in red) in which human carbon emissions continue without mitigation. The second scenario is an “overshoot” scenario, namely an SSP5 3.4-overshoot, in which emissions continue to grow but then drop rapidly in the middle of the 21st century, as shown in orange.
The third scenario is SSP1 1.9, labelled as the “Paris Agreement” scenario and shown in green, in which carbon emissions drop rapidly from the present day. The goal of this piece is to demonstrate the range of differences between some of the SSP scenarios on sea surface temperature.</p>
      <p id="d1e1548">The PI control run and much of the historical scenario data are relatively constant. However, they start to diverge in the 1950s. In the future scenarios, the three projects all behave similarly until the 2030s;
then the SSP1 1.9 scenario branches off and maintains a relatively constant global mean sea surface temperature. The SSP5 3.4 scenario's SST continues to grow until the year 2050, while the SSP5 8.5 scenario's SST grows until the end of the simulation.</p>
      <p id="d1e1551">Musically, this piece is consistently in the scale of A minor harmonic, with no modulating chord progression. The minor harmonic scale is a somewhat artificial scale in that it augments seventh note of the natural minor scale. The augmented seventh means that there is a minor third
between the sixth and seventh note, making it sound uneasy and sad (at least to the author's ears). An aria is a self-contained piece for one voice, normally within a larger work. In this case, the name “aria” is used to highlight that only one data set, namely the sea surface temperature, participates in the piece. This piece starts relatively low and slow, then grows higher and louder as the future scenarios are added to the piece.
The unchanging minor harmonic chord, slow tempo and pitch range were chosen
to elicit a sense of dread and discord as the piece progresses to the catastrophic SSP5 8.5 scenario at the end of the 21st century.</p>
</sec>
<sec id="Ch1.S4.SS4">
  <label>4.4</label><title>“Ocean acidification in E minor”</title>
      <p id="d1e1562">“Ocean acidification in E<inline-formula><mml:math id="M23" display="inline"><mml:msub><mml:mi/><mml:mtext mathvariant="italic">minor</mml:mtext></mml:msub></mml:math></inline-formula>” demonstrates the standard modelling practice of branching historical simulations from the PI control run and the impact of rising anthropogenic carbon on the ocean carbon cycle. The final frame of this video is shown in panel (4) of Fig. <xref ref-type="fig" rid="Ch1.F4"/>. The top pane shows the global mean dissolved inorganic carbon (DIC) concentration in the surface of the ocean, and the lower pane shows the global mean sea surface pH. In both panes, the PI control run data are shown as a black line, and the coloured lines represent the 15 historical simulations.</p>
      <p id="d1e1576">This piece uses a repeating “12 bar blues” structure in E minor and a relatively fast tempo. This chord progression is an exceptionally common progression, especially in blues, jazz and early rock and roll.
It is composed of four bars of the E minor, two bars of A minor, two bars of E minor, then one bar of B minor, A minor, E minor and B minor. The 12 bar blues can be heard in songs such as “Johnny B. Goode” by Chuck Berry, “Hound dog” by Elvis Presley, “I got you (I feel Good)” by James Brown, “Sweet home Chicago” by Robert Johnson or “Rock and roll” by Led Zeppelin. In the context of Earth system music, the 12 bar pattern with its opening set of four bars, then two sets of two bars and ending with four sets of one bar between key changes drives the song forward before starting again slowly. This behaviour is thematically similar to the behaviour of the ocean acidification in UKESM1 historical simulation, in which the bulk of the acidification occurs at the end of each historical period.</p>
      <p id="d1e1579">This video highlights that the marine carbon system has been heavily impacted over the historical period. In the PI control runs, both the pH and the DIC are very stable. However, in all historical simulations with rising atmospheric <inline-formula><mml:math id="M24" display="inline"><mml:mrow class="chem"><mml:msub><mml:mi mathvariant="normal">CO</mml:mi><mml:mn mathvariant="normal">2</mml:mn></mml:msub></mml:mrow></mml:math></inline-formula>, the DIC concentration rises and the pH falls.
The process of ocean acidification is relatively simple and well understood <xref ref-type="bibr" rid="bib1.bibx2 bib1.bibx23" id="paren.27"/>. The atmospheric <inline-formula><mml:math id="M25" display="inline"><mml:mrow class="chem"><mml:msub><mml:mi mathvariant="normal">CO</mml:mi><mml:mn mathvariant="normal">2</mml:mn></mml:msub></mml:mrow></mml:math></inline-formula> is absorbed from the air into the ocean surface, which releases hydrogen ions into the ocean,
making the ocean more acidic. The concentration of DIC in the sea surface
is closely linked with the concentration of atmospheric <inline-formula><mml:math id="M26" display="inline"><mml:mrow class="chem"><mml:msub><mml:mi mathvariant="normal">CO</mml:mi><mml:mn mathvariant="normal">2</mml:mn></mml:msub></mml:mrow></mml:math></inline-formula>,
and it rises over the historic period. This behaviour was observed in every single UKESM1 historical simulation. This video also illustrates an important part of the methodology used to produce models of the climate that may not be widely known outside our community. When we produce models of the Earth system, we use a range of points of the PI control as the initial conditions for the historical simulations. All the historical simulations have slightly different starting points, and evolve from these different initial conditions, which give us more confidence that the results of our projections are due to changes since the pre-industrial period instead of simply a consequence of the initial conditions. In this figure, the historical simulations are shown where they branch from the PI control run instead of using the “real” time as the <inline-formula><mml:math id="M27" display="inline"><mml:mi>x</mml:mi></mml:math></inline-formula> axis.</p>
</sec>
<sec id="Ch1.S4.SS5">
  <label>4.5</label><title>“Giant Steps Spin Up”</title>
      <p id="d1e1633">This piece combines the spin up of the United Kingdom Earth System
Model with the chord progression of John Coltrane's “Giant steps” <xref ref-type="bibr" rid="bib1.bibx4" id="paren.28"/>. The spin up is the process of running the model from a set of initial conditions to a near-steady-state climate. When a model reaches a steady state, this means that there is no significant trend or drift in the mean behaviour of several key metrics. For instance, as part of the Coupled Climate Carbon Cycle Model Intercomparison Project (C4MIP) protocol, <xref ref-type="bibr" rid="bib1.bibx17" id="text.29"/> suggest a drift criterion of less than 10 Pg of carbon per century in the absolute value of the flux of <inline-formula><mml:math id="M28" display="inline"><mml:mrow class="chem"><mml:msub><mml:mi mathvariant="normal">CO</mml:mi><mml:mn mathvariant="normal">2</mml:mn></mml:msub></mml:mrow></mml:math></inline-formula> from the atmosphere to the ocean. In practical terms, the ocean model is considered to be spun up when the long-term average of the air–sea flux of carbon is consistently between <inline-formula><mml:math id="M29" display="inline"><mml:mo>-</mml:mo></mml:math></inline-formula>0.1 and 0.1 Pg of carbon per year.</p>
      <?pagebreak page273?><p id="d1e1660">The spin up is a crucial part of model development. Without spinning up, the historical ocean model would still be equilibrating with the atmosphere.
It would be much more difficult to separate the trends in the historical and future scenarios from the underlying trend of a model still trying to equilibrate. Note that while a steady-state model does not have any significant long-term trends or drifts, it can still have short-term variability. This short-term variability can be seen in the pre-industrial simulation in the “Pre-industrial Vivace” piece. It can take a model thousands of years of simulations for the ocean to reach a steady state.
In our case, the spin up ran for approximately 5000 simulated years before the spun up drift criterion was met <xref ref-type="bibr" rid="bib1.bibx42" id="paren.30"/>.</p>
      <p id="d1e1666">The UKESM1 spin up was composed of several phases in succession. The first stage was a fully coupled run using an early version of UKESM1. Then, an ocean-only run was started using a 30 year repeating atmospheric forcing data set. The beginning of this part of the run is considered to be the beginning of the spin up, and the time axis is set to zero at the start of this run.
This is because the early version of UKESM1 did not include a carbon system in the ocean. After about 1900 years of simulating the ocean with the repeating atmospheric forcing data set, we had found that some changes were needed to the physical model. At this point, we initialised a new simulation from the final year of the previous stage and changed the atmospheric forcing. This second ocean-only simulation ran until the year 4900.
At the point, we finished the spin up with a few hundred years of fully coupled UKESM1 with ocean, land, sea ice and atmosphere models.
Due to the slow and repetitive native of the ocean-only spin up, several centuries of data were omitted. These are marked as grey vertical lines in the video and panel (5) of Fig. <xref ref-type="fig" rid="Ch1.F4"/>.</p>
      <p id="d1e1671">The piece is composed of several important metrics of the spin up in the ocean, such as the Atlantic meridional overturning current (purple),
Arctic ocean total ice extent (blue), the global air–sea flux of <inline-formula><mml:math id="M30" display="inline"><mml:mrow class="chem"><mml:msub><mml:mi mathvariant="normal">CO</mml:mi><mml:mn mathvariant="normal">2</mml:mn></mml:msub></mml:mrow></mml:math></inline-formula> (red), the volume-weighted mean temperature of the Arctic ocean (orange),
the surface mean DIC in the Arctic Ocean (pink) and the surface mean chlorophyll concentration in the Arctic ocean (green).</p>
      <p id="d1e1686">The music is based on the chord progression from the jazz standard, John Coltrane's “Giant steps”, although the musical progression was slowed to one chord change per four beats instead of a change at every beat. This change occurred as an accident, but we found that the full-speed version
sounded very chaotic, so the slowed version was published instead.
This piece was chosen because it has a certain notoriety due to the difficulty for musicians to improvise over the rapid chord changes.
In addition, “Giant steps” was the first new composition to feature Coltrane changes. Coltrane changes are a complex cyclical harmonic progression which form a musical framework for jazz improvisation.
We hoped that the complexity of the Earth system model is reflected in the complexity of the harmonic structure of the piece. The cyclical relationship of the Coltrane changes also reflects the 30 year repeating atmospheric forcing data set used to spin up the ocean model.</p>
</sec>
<sec id="Ch1.S4.SS6">
  <label>4.6</label><title>“Seven Levels of Climate Change”</title>
      <p id="d1e1698">This piece is based on a YouTube video by Adam Neely, called “The 7 levels of jazz harmony” <xref ref-type="bibr" rid="bib1.bibx22" id="paren.31"/>. In that video, Neely demonstrates seven increasingly complex levels of jazz harmony by re-harmonising a line of the chorus of Lizzo's song “Juice”. We have repeated Neely's re-harmonisation of “Juice” here, such that each successive level's note choice is informed by Earth system simulations, with increasing levels of emissions and stronger anthropogenic climate change.</p>
      <p id="d1e1704">At the time of writing, UKESM1 had produced simulations of seven future scenarios. The seven scenarios of climate change and their associated jazz harmony are as follows:
<list list-type="bullet"><list-item>
      <p id="d1e1709">Level 0: PI control – original harmony</p></list-item><list-item>
      <p id="d1e1713">Level 1: SSP1 1.9 – four note chords</p></list-item><list-item>
      <p id="d1e1717">Level 2: SSP1 2.6 – tritone substitution</p></list-item><list-item>
      <p id="d1e1721">Level 3: SSP4 3.4 – tertiary harmony extension</p></list-item><list-item>
      <p id="d1e1725">Level 4: SSP5 3.4 (overshoot) – pedal point</p></list-item><list-item>
      <p id="d1e1729">Level 5: SSP2 4.5 – non-functional harmony</p></list-item><list-item>
      <p id="d1e1733">Level 6: SSP3 7.0 – liberated dissonance</p></list-item><list-item>
      <p id="d1e1737">Level 7: SSP5 8.5 – fully chromatic</p></list-item></list></p>
      <p id="d1e1740">Note that we were not able to reproduce Neely's seventh level, namely intonalism or xenharmony. In this level, the intonation of the notes are changed depending on the underlying melody. Unfortunately, the MIDITime Python interface for MIDI has not yet reached such a level of sophistication.
Instead, we simply allow all possible values of the 12-note chromatic scale.</p>
      <p id="d1e1743">The data sets used in this piece are a set of global-scale metrics that
show the bulk properties of the model under the future climate change scenarios. They include the global mean SST (red), the global mean surface pH (purple), the Drake Passage current (yellow), the global mean surface chlorophyll concentration (green), the global total air to sea flux to <inline-formula><mml:math id="M31" display="inline"><mml:mrow class="chem"><mml:msub><mml:mi mathvariant="normal">CO</mml:mi><mml:mn mathvariant="normal">2</mml:mn></mml:msub></mml:mrow></mml:math></inline-formula> (gold) and the global total ice extent (blue). As the piece progresses through the seven levels, the anthropogenic climate change in the model becomes more extreme, matching the increasingly esoteric harmonies of the music.</p>
</sec>
</sec>
<sec id="Ch1.S5">
  <label>5</label><title>Limitations and potential extensions</title>
      <p id="d1e1767">We have successfully demonstrated that it is possible to generate music
using data from the UK's Earth System Model. We have also shown that we can illustrate some standard<?pagebreak page274?> practices in Earth system modelling using music.
Within the  framework of this pilot study, we must also raise some limitations and suggest some possible extensions for future versions of this work.</p>
      <p id="d1e1770">A significant omission from this study is the measurement of the impact,
the reach or the engagement of these works. We did not test whether the audience was composed of laypeople or experts. We did not investigate whether the audience learnt anything about Earth system modelling through these series of videos. We did not monitor the audience reactions or interpretations of the music. Future extensions of this project should include a survey of the audience to investigate their backgrounds and demographics, what they learnt about Earth system models, and their overall impressions of the pieces. This could take the form of an online survey associated with each video or a discussion with the audience at a live performance.</p>
      <p id="d1e1773">In addition, in this work, we make no effort to monitor or describe the reach of the YouTube videos, track comments, subscriptions or the source of the views. While some tools are available for monitoring the number of videos within YouTube's content creator toolkit, YouTube Studio <xref ref-type="bibr" rid="bib1.bibx14" id="paren.32"/>, a preliminary investigation found that it was not possible to use these tools alone to create a sufficiently detailed analyses of the impact, reach or dissemination of these music creation methods.
YouTube Studio currently includes some demographic details, including gender, country of origin, viewership age, and traffic source, but it is not sufficient for an audience survey. This toolkit was built to help content creators monitor and build their audience and to monetise videos using advertisements. It is not fit for the purpose of scientific engagement monitoring. For instance, it was not possible to use YouTube Studio to determine the expertise of the audience, their thoughts on climate change,
whether they read the video description section or whether they understood the description. Some of these features could be added to YouTube by Google,
but many of them would require the audience survey described above.</p>
      <p id="d1e1779">Our videos only include the music and a visualisation of the data;
they do not include any description about how the music was generated or the
Earth system modelling methods used to create the underlying data.
The explanations of the science and musification methodologies
are given in a description below the video. Furthermore, viewers must expand this box by clicking the “show more” button. Using the tools provided in YouTube studio, it is not currently possible to determine whether the viewers
have expanded, read or understood the description section. When we have shown these videos to audiences at scientific meetings and conferences,
it has always been associated with a brief explanation of the methods.
In future, this explanatory preface to the work could be included in the video itself, or as a separate video, in addition to the text below the video in the description section. This would likely increase the audience's understanding of our music-generation process.</p>
      <p id="d1e1783">If additional pieces were made, there are several potential ways that the methodology used to create them could be improved relative to the methods used to create the initial set of videos. In future versions of this work, it should be possible to use the ESMValTool <xref ref-type="bibr" rid="bib1.bibx27" id="paren.33"/>
to produce the time series data instead of BGC-val. This would make the production of the time series more easily repeatable but would also make it easier for pieces to be composed using data available in the CMIP5 and CMIP6 coupled model intercomparison projects. This broadens the scope of the data by allowing other models, other model domains, including the atmosphere and the land surface, and even observational data sets. For instance, we could make a multi-model intercomparison piece or a piece based on the atmospheric, terrestrial and ocean components of the same model. In addition, using ESMValTool would also make it more straightforward to distribute the source code that was used to make these pieces.</p>
      <p id="d1e1789">In the reflections on auditory graphics, <xref ref-type="bibr" rid="bib1.bibx13" id="text.34"/> lists several “Things that work” and “Approaches that do not work”. From the list of things that work, we included four of the five methods that worked: pitch coding of numeric data, the exploitation of temporal resolution of human audition, manipulating loudness changes and using time as time. We were not able to include the selection of distinct timbres to minimise stream confusion. From the list of approaches that do not work, we successfully avoided several of the pitfalls, notably pitch mapping to continuous variables and using loudness changes to represent an important continuous variable. However, we did include one of the approaches that Flowers did not recommend: we simultaneously plotted several variables with similar pitches and timbres. However, it is worth noting that maximising the clarity of the sonification is the goal of <xref ref-type="bibr" rid="bib1.bibx13" id="text.35"/>, but our focus was to produce
and disseminate some relatively listenable pieces of music using UKESM1 data.</p>
      <p id="d1e1798">The two suggestions by <xref ref-type="bibr" rid="bib1.bibx13" id="text.36"/> that we failed to address
were both related to using the same timbre digital piano synthesiser for all data. Due to the technical limitations of using TiMidity<inline-formula><mml:math id="M32" display="inline"><mml:mrow><mml:mo>+</mml:mo><mml:mo>+</mml:mo></mml:mrow></mml:math></inline-formula>, we were not able to vary the instruments used, and thus there was very little variability in terms of the timbres. These pieces were all performed by the same instrument, a solo piano, which limits the musical diversity of the set of pieces. In addition, each data set in a given piece was performed by the same instrument, making it difficult to distinguish the different data sets being performed simultaneously. Further extensions of this work could use a fully featured digital audio workstation to access a range of digital instruments beyond the digital piano, such as a string quartet, a horn and woodwind section, a full digital orchestra, electric guitar and bass, percussive instruments, or electronic synthesised instruments. This would comply with the suggestions listed in <xref ref-type="bibr" rid="bib1.bibx13" id="text.37"/>, allowing the individual data sets to stand out musically from each other in an individual piece, but would also lead to a much more diverse set of musical pieces.</p>
      <?pagebreak page275?><p id="d1e1817">From a musical perspective, there are many ways to improve the performances of the pieces for future versions of this work. As raised in the comments from social media, a human pianist would be able to add a warmth to the
performance that is beyond the abilities of MIDI interpreters. A recording of a human performance could also add the hidden artefacts of a live recording, such as room noise, stereo effects and natural reverb. On the other hand, due to the nature of the process used to generate these pieces, it may not be possible for a single human to perform several of the pieces due to the speed, complexity, number of simultaneous notes or the range of these pieces. Alternatively, it may be possible to “humanise” the MIDI by making subtle changes to the timing and velocities of the MIDI notes.
This is a recording technique that can take a synthesised, perfectly timed beat and make it sound like it is being played by a human. It does this by moving the individual notes slightly before or after the beat, and adding subtle variations in the velocity <xref ref-type="bibr" rid="bib1.bibx38" id="paren.38"/>. Also, TiMidity<inline-formula><mml:math id="M33" display="inline"><mml:mrow><mml:mo>+</mml:mo><mml:mo>+</mml:mo></mml:mrow></mml:math></inline-formula> uses the same piano sample for each pitch. This means that when two tracks of a piece play the same pitch at the same time, exactly the same sample is played twice simultaneously. These two identical sample sound waves are added constructively, and the note jumps out much louder than it would if a human played the part. A fully featured digital piano or a human performance would remove these loud jumps but also be able to add more nuance and warmth to the performance. Finally, the published pieces had no mastering or post-production. Even a basic mastering session by a professional sound engineer would likely improve the overall quality of the sound of these pieces.</p>
      <p id="d1e1833">In terms of the selection of chords progression, tempo and rhythms,
it may be possible to target specific audiences using music based
on popular artists or genres. For instance, the reach of a piece might be increased by responding to viral videos or by basing a work on a popular song.</p>
      <p id="d1e1836">In these works, we have focused on reproducing Western music, both traditional and modern, in order to connect each piece with the associated emotional musical cues. Alternatively, there is a significant diversity in traditional and modern styles of music from other regions around the world; a much wider range of rhythms, timbres, styles and emotional cues could be exploited in future extensions of this work.</p>
      <p id="d1e1840">With regards to the visual aspect of these videos, it should be straightforward to improve the quality of the graphics used. The current videos only show a simple scalar field as it develops over time. They could be improved by adding animated global maps of the model, interviews or live performances to the video. It may also be a positive addition to preface the videos with a brief explanation of the project and the methods deployed.
On the technical side, there may also be some visual glitches and artefacts which arise due to YouTube's compression or streaming algorithms. A different streaming service or alternative video making software might help remove these glitches.</p>
      <p id="d1e1843">YouTube videos are typically shown in the suggestions queue with a thumbnail image and the video title. The thumbnail is the graphic placeholder that shows the video while it is not playing on YouTube as a suggested video or in Facebook or Twitter feeds. The thumbnail is how viewers first encounter the video, and it is a crucial part of attracting an audience. There are lots of guides helping one to create better thumbnails <xref ref-type="bibr" rid="bib1.bibx18 bib1.bibx37 bib1.bibx21" id="paren.39"/>. Future works should attempt to optimise the video thumbnail to attract a wider audience.</p>
      <p id="d1e1849">While we did not investigate the reach or dissemination of these pieces in this work, if the goal of future projects was to increase the online audience size then it might be possible to reach a wider audience using a press release, a public screening of the videos, a scheduled publication date
or through a collaboration with other musicians or YouTube content creators.
It may also be possible to host a live concert, make a live recording or broadcast a YouTube live stream. It is not fully understood how a video can go viral, but it has been shown that view counts can rise exponentially when a single person or organisation with a large audience shares a video <xref ref-type="bibr" rid="bib1.bibx40 bib1.bibx16" id="paren.40"/>. Improvements to the music, the video, the description and the thumbnail make it more likely for such an influencer with large audience to like, share or retweet a piece, which could result in a significant increase in the audience size and view count. The videos in this work were posted online in an ad hoc fashion as soon as they were finished.
To maximise the number of views, experts have recommended consistent, scheduled in advance, weekly videos, and it has been advised to publish them late in the week in the afternoons <xref ref-type="bibr" rid="bib1.bibx6 bib1.bibx35" id="paren.41"/>.
Finally, it should be possible to increase the reach of this work through
paid advertising on YouTube and other social media platforms. This would place the videos higher in the suggested video rankings and on the discovery queues.</p>
</sec>
<sec id="Ch1.S6" sec-type="conclusions">
  <label>6</label><title>Conclusions</title>
      <p id="d1e1866">In this work, we took data from the first United Kingdom Earth System Model and converted it into six musical pieces and videos. These pieces covered
the core principles of climate modelling or ocean modelling, namely PI control runs, the spin-up process, multiple future scenarios, the Drake Passage current, the air–sea flux of <inline-formula><mml:math id="M34" display="inline"><mml:mrow class="chem"><mml:msub><mml:mi mathvariant="normal">CO</mml:mi><mml:mn mathvariant="normal">2</mml:mn></mml:msub></mml:mrow></mml:math></inline-formula> and the Atlantic meridional overturning circulation. While limited to a single instrument, namely the synthesised piano, they included a range of musical styles, including classical, jazz, blues and contemporary styles.</p>
      <p id="d1e1880">While the wider public are likely to be familiar with climate change,
they are less likely to be familiar with our community's methods. In fact, many standard tools in the arsenal of climate modellers may not be widely appreciated outside our small community, even within the scientific community. These six musical pieces open the door on a new, exciting<?pagebreak page276?> and fun approach to how we engage with the fellow scientists and the wider public.</p>
      <p id="d1e1883">We have also discussed some ways of improving future iterations of this pilot study. Future works could be performed to a live audience, we could collaborate with musicians and the viewership would likely be increased
with improved video graphics, thumbnails, live performances, video diversity, and more frequent upload rates. The scientific content of the videos could be expanded by accessing new data sets, other parts of the UKESM1 Earth System Model, other CMIP models or observational data sets. The quality of the music could be improved by including additional instruments and musical genres,
and by making live recordings instead of MIDI performance. The knowledge transfer aspect of the project could be improved upon by appending explanations of the science to the video and by surveying the audience to identify the impact of these works.</p>
      <p id="d1e1886">Finally, the authors would like to encourage other scientists to think about how their work may be sonified. You may have beautiful and unique music hidden within your data; the methods described in this work would allow it to be made manifest.</p>
</sec>

      
      </body>
    <back><notes notes-type="dataavailability"><title>Data availability</title>

      <p id="d1e1893">The sheet music for the four pieces and the MIDI files for all six pieces are available alongside this publication in the Supplement. Note that it was not possible to produce sheet music for “Ocean Acidification in E minor” or “Seven Levels of Climate Change” as there are too many MIDI tracks in these pieces. The UKESM1 model data used in this work is available via the World Climate Research Programme (WCRP) CMIP6 data interface <uri>https://esgf-node.llnl.gov/projects/cmip6/</uri> (last access: 17 August 2020, WCRP, 2020).</p>
  </notes><notes notes-type="videosupplement"><title>Video supplement</title>

      <p id="d1e1902">These videos are published online on the YouTube channel:
<uri>https://www.youtube.com/c/LeedeMora</uri>, last access: 17 August 2020 <xref ref-type="bibr" rid="bib1.bibx9" id="paren.42"/>.</p>

      <p id="d1e1911">The videos described here are distributed under the standard YouTube Licence.
The chord progressions from John Coltrane's “Giant steps”, Lizzo's “Juice” and Adam Neely's re-harmonisation of “Juice” were reproduced
under fair use without explicit permission from the copyright owners.</p>
  </notes><app-group>
        <supplementary-material position="anchor"><p id="d1e1914">The supplement related to this article is available online at: <inline-supplementary-material xlink:href="https://doi.org/10.5194/gc-3-263-2020-supplement" xlink:title="zip">https://doi.org/10.5194/gc-3-263-2020-supplement</inline-supplementary-material>.</p></supplementary-material>
        </app-group><notes notes-type="authorcontribution"><title>Author contributions</title>

      <p id="d1e1923">LdM used BGC-val to produce the model time series data, sonified the BGC-val data, published the videos and prepared the text. AAS, RSS and JW provided feedback and held early discussions on the music. ESM, AY, JP and TK helped develop the core time series data sets in UKESM1. RJP shared the finished videos and provided audience feedback. JCB and CGJ led the PML modelling group and UKESM1 projects, respectively, and both provided crucial feedback and support.</p>
  </notes><?xmltex \hack{\newpage}?><notes notes-type="competinginterests"><title>Competing interests</title>

      <p id="d1e1930">The authors declare that they have no conflict of interest.</p>

      <p id="d1e1933">Like most YouTube content creators, Lee de Mora has a financial relationship with YouTube. However, at the time of writing, the channel in which these videos
were posted did not meet YouTube's monetisation requirements (i.e. 1000 subscribers and 4000 h watched).</p>
  </notes><ack><title>Acknowledgements</title><p id="d1e1939">Lee de Mora, Andrew Yool, Julien Palmieri, Robin S. Smith, Till Kuhlbrodt, Robert J. Parker, Jeremy C. Blackford and Colin G. Jones were supported by the  National Environmental Research Council (NERC) National Capability Science Multi-Centre (NCSMC) funding for the UK Earth System Modelling project.  Alistair A. Sellar and Jeremy Walton were supported by the Met Office Hadley Centre Climate Programme funded by BEIS and Defra. The following funding is also acknowledged for the following contributors: Colin G. Jones, Till Kuhlbrodt and  Robin S. Smith (grant no. NE/N017978/1); Robert J. Parker (grant no. NE/N018079/1); and Andrew Yool, Julien Palmieri, Lee de Mora and Jeremy C. Blackford (grant no. NE/N018036/1). Colin G. Jones, Till Kuhlbrodt, Andrew Yool and Julien Palmieri additionally acknowledge the EU Horizon 2020 CRESCENDO Project (grant no. 641816).</p><p id="d1e1941">We acknowledge use of the MONSooN2 system, a collaborative facility supplied under the joint Weather and Climate Research Programme, which is a strategic partnership between the Met Office and the Natural Environment Research Council.</p><p id="d1e1943">The simulation data used in this study are archived at the Met Office and are available for research purposes through the JASMIN platform (<uri>http://www.jasmin.ac.uk</uri>, last access: 17 August 2020) maintained by the Centre for Environmental Data Analysis (CEDA).</p><p id="d1e1948">The authors would like to thank our handling editor at <italic>Geoscience Communication</italic>, Sam Illingworth, and the referees. Their contributions were valuable and resulted in a significantly improved paper.</p><p id="d1e1953">Finally, the authors would also like to thank anyone who took the time to watch a video, leave a comment, use the like button, subscribe to the channel or share these videos.</p></ack><notes notes-type="financialsupport"><title>Financial support</title>

      <p id="d1e1959">This research has been supported by the NERC Environmental Bioinformatics Centre (grant nos. NE/N018036/1 and NE/N018079/1) and the EU Horizon 2020 (grant no. 641816).</p>
  </notes><notes notes-type="reviewstatement"><title>Review statement</title>

      <p id="d1e1965">This paper was edited by Sam Illingworth and reviewed by Solmaz Mohadjer and one anonymous referee.</p>
  </notes><?xmltex \hack{\newpage}?><ref-list>
    <title>References</title>

      <ref id="bib1.bibx1"><label>Borromeo et al.(2016)</label><?label Borromeo2016?><mixed-citation>Borromeo, L., Round, K., and Perera, J.: Climate Symphony, available at:
<uri>https://www.disobedientfilms.com/climate-symphony</uri> (last access: 17 August 2020),
2016.</mixed-citation></ref>
      <ref id="bib1.bibx2"><label>Caldeira and Wickett(2003)</label><?label Caldeira2003?><mixed-citation>Caldeira, K. and Wickett, M. E.: Anthropogenic carbon and ocean pH, Nature,
425, 365–365, <ext-link xlink:href="https://doi.org/10.1038/425365a" ext-link-type="DOI">10.1038/425365a</ext-link>, 2003.</mixed-citation></ref>
      <ref id="bib1.bibx3"><label>Clendinning and Marvin(2016)</label><?label Clendinning2016?><mixed-citation>
Clendinning, J. P. and Marvin, E. W.: The Musician's Guide To Theory And
Analysis, W. W. Norton &amp; Company, 3rd Edn.,
2016.</mixed-citation></ref>
      <ref id="bib1.bibx4"><label>Coltrane(1960)</label><?label Giantsteps?><mixed-citation>
Coltrane, J.: Giant Steps (album), Atlantic Records, Published February 1960.</mixed-citation></ref>
      <ref id="bib1.bibx5"><label>Corey(2016)</label><?label miditime?><mixed-citation>Corey, M.: MIDITime python library for MIDI, available at:
<uri>https://github.com/cirlabs/miditime</uri> (last access: 17 August 2020),
2016.</mixed-citation></ref>
      <ref id="bib1.bibx6"><label>Cox(2017)</label><?label whentopost1?><mixed-citation>Cox, S.: How Often Should You Upload Videos to YouTube to Get More Views, available at:
<ext-link xlink:href="https://www.filmora.io/community-blog/how-often-should-you-upload-to-youtube--consistent-posting-187.html">https://www.filmora.io/community-blog/how-often-should-you-upload-to-youtube–consistent-posting-187.html</ext-link> (last access: 17 August 2020),
2017.</mixed-citation></ref>
      <ref id="bib1.bibx7"><label>Craven and Mcindoe(1999)</label><?label Craven1999?><mixed-citation>
Craven, R. M. and Mcindoe, A. K.: Continuous auditory monitoring – how much
information do we register?, Brit. J. Anaesth., 83, 747–749,
1999.</mixed-citation></ref>
      <ref id="bib1.bibx8"><label>Crawford(2013)</label><?label Crawford2013?><mixed-citation>Crawford, D.: Planetary Bands, Warming World string quartet, Video published by Ensia magazine, available at: <uri>https://vimeo.com/127083533</uri>  (last access: 17 August 2020),
2013</mixed-citation></ref>
      <ref id="bib1.bibx9"><label>de Mora(2019)</label><?label LeeYoutube?><mixed-citation>de Mora, L.: Lee de Mora's YouTube channel homepage, available at:
<uri>https://www.youtube.com/c/LeedeMora</uri> (last access: 17 August 2020),
2019.</mixed-citation></ref>
      <ref id="bib1.bibx10"><label>de Mora et al.(2018)</label><?label demora2018?><mixed-citation>de Mora, L., Yool, A., Palmieri, J., Sellar, A., Kuhlbrodt, T., Popova, E., Jones, C., and Allen, J. I.: BGC-val: a model- and grid-independent Python toolkit to evaluate marine biogeochemical models, Geosci. Model Dev., 11, 4215–4240, <ext-link xlink:href="https://doi.org/10.5194/gmd-11-4215-2018" ext-link-type="DOI">10.5194/gmd-11-4215-2018</ext-link>, 2018.</mixed-citation></ref>
      <ref id="bib1.bibx11"><label>Eyring et al.(2016)</label><?label Eyring2016?><mixed-citation>Eyring, V., Bony, S., Meehl, G. A., Senior, C. A., Stevens, B., Stouffer,
R. J., and Taylor, K. E.: Overview of the Coupled Model Intercomparison
Project Phase 6 (CMIP6) experimental design and organization, Geosci.
Model Dev., 9, 1937–1958, <ext-link xlink:href="https://doi.org/10.5194/gmd-9-1937-2016" ext-link-type="DOI">10.5194/gmd-9-1937-2016</ext-link>, 2016.</mixed-citation></ref>
      <ref id="bib1.bibx12"><label>FFmpeg Developers(2017)</label><?label ffmpeg?><mixed-citation>FFmpeg Developers: FFmpeg, a complete, cross-platform solution to record,
convert and stream audio and video, available at:
<uri>https://ffmpeg.org/</uri> (last access: 17 August 2020),
2017.</mixed-citation></ref>
      <ref id="bib1.bibx13"><label>Flowers(2005)</label><?label Flowers2005?><mixed-citation>
Flowers, J. H.: Thirteen years of reflection on auditory graphing: promises,
pitfalls and potential new directions, Proceedings of ICAD 05-Eleventh Meeting of the International Conference on Auditory Display, Limerick, Ireland, 6–9 July,   406–409, 2005.</mixed-citation></ref>
      <ref id="bib1.bibx14"><label>Google(2019)</label><?label youtubestudio?><mixed-citation>Google: Manage your channel with Creator Studio – Youtube studio support
website, available at:
<uri>https://support.google.com/youtube/answer/9440613</uri> (last access: 17 August 2020),
2019.</mixed-citation></ref>
      <ref id="bib1.bibx15"><label>Izumo and Toivonen(2004)</label><?label timidity?><mixed-citation>Izumo, M. and Toivonen, T.: TiMidity<inline-formula><mml:math id="M35" display="inline"><mml:mrow><mml:mo>+</mml:mo><mml:mo>+</mml:mo></mml:mrow></mml:math></inline-formula> open source MIDI to WAVE converter and
player, available at: <uri>http://timidity.sourceforge.net/</uri> (last access: 17 August 2020),
2004.</mixed-citation></ref>
      <ref id="bib1.bibx16"><label>Jiang et al.(2014)</label><?label Jiang2014?><mixed-citation>Jiang, L., Miao, Y., Yang, Y., Lan, Z., and Hauptmann, A. G.: Viral Video
Style: A Closer Look at Viral Videos on YouTube, in: Proceedings of
International Conference on Multimedia Retrieval, 193–200,
<ext-link xlink:href="https://doi.org/10.1145/2578726.2578754" ext-link-type="DOI">10.1145/2578726.2578754</ext-link>, 2014.</mixed-citation></ref>
      <ref id="bib1.bibx17"><label>Jones et al.(2016)</label><?label Jones2016?><mixed-citation>Jones, C. D., Arora, V., Friedlingstein, P., Bopp, L., Brovkin, V., Dunne, J.,
Graven, H., Hoffman, F., Ilyina, T., John, J. G., Jung, M., Kawamiya, M.,
Koven, C., Pongratz, J., Raddatz, T., Randerson, J. T., and Zaehle, S.: C4MIP
– The Coupled Climate–Carbon Cycle Model Intercomparison Project:
experimental protocol for CMIP6, Geosci. Model Dev., 9,
2853–2880, <ext-link xlink:href="https://doi.org/10.5194/gmd-9-2853-2016" ext-link-type="DOI">10.5194/gmd-9-2853-2016</ext-link>, 2016.</mixed-citation></ref>
      <ref id="bib1.bibx18"><label>Kjellberg and PewDiePie(2017)</label><?label thumbnail0?><mixed-citation>Kjellberg, F. A. U. and PewDiePie: How to make really good thumbnails on
YouTube, available at:
<uri>https://www.youtube.com/watch?v=Nz3Ngt0AMDA</uri> (last access: 17 August 2020),
2017.</mixed-citation></ref>
      <ref id="bib1.bibx19"><label>Morris and Mohacsi(2005)</label><?label Morris2005?><mixed-citation>
Morris, R. W. and Mohacsi, P. J.: How Well Can Anaesthetists Discriminate
Pulse Oximeter Tones?, Anaesth Intensive Care, 33, 497–500, 2005.</mixed-citation></ref>
      <ref id="bib1.bibx20"><label>MuseScore BVBA(2019)</label><?label musescore?><mixed-citation>MuseScore BVBA: MuseScore Music Score Editor, available at:
<uri>https://musescore.com/</uri> (last access: 17 August 2020),
2019.</mixed-citation></ref>
      <ref id="bib1.bibx21"><label>Myers(2019)</label><?label thumbnail2?><mixed-citation>Myers, L.: This is How to Create the Best YouTube Thumbnails, available at:
<uri>https://louisem.com/198803/how-to-youtube-thumbnails</uri> (last access: 17 August 2020),
2019.</mixed-citation></ref>
      <ref id="bib1.bibx22"><label>Neely(2019)</label><?label Neely?><mixed-citation>Neely, A.: The 7 Levels of Jazz Harmony, available at:
<uri>https://www.youtube.com/watch?v=lz3WR-F_pnM</uri> (last access: 17 August 2020),
2019.</mixed-citation></ref>
      <ref id="bib1.bibx23"><label>Orr et al.(2005)</label><?label Orr2005?><mixed-citation>Orr, J. C., Fabry, V. J., Aumont, O., Bopp, L., Doney, S. C., Feely, R. A.,
Gnanadesikan, A., Gruber, N., Ishida, A., Joos, F., Key, R. M., Lindsay, K.,
Maier-reimer, E., Matear, R., Monfray, P., Mouchet, A., Najjar, R. G.,
Slater, R. D., Totterdell, I. J., Weirig, M.-f., Yamanaka, Y., and Yool, A.:
Anthropogenic ocean acidification over the twenty-first century and its
impact on calcifying organisms, Nature, 437, 681–686,
<ext-link xlink:href="https://doi.org/10.1038/nature04095" ext-link-type="DOI">10.1038/nature04095</ext-link>, 2005.</mixed-citation></ref>
      <ref id="bib1.bibx24"><label>Pollack and Ficks(1954)</label><?label Pollack1954?><mixed-citation>Pollack, I. and Ficks, L.: The Information of Elementary Multidimensional
Auditory Displays,  J. Acoust. Soc. Am., 26, p. 136,
<ext-link xlink:href="https://doi.org/10.1121/1.1917759" ext-link-type="DOI">10.1121/1.1917759</ext-link>, 1954.</mixed-citation></ref>
      <ref id="bib1.bibx25"><label>Revill(2018)</label><?label Revill2018?><mixed-citation>
Revill, G.: Landscape, Music and Sonic Environments, in: The Routledge
Companion to Landscape Studies, edited by: Howard, P., Thompson, I.,
Waterton, E., and Atha, M.,   chap. 21, London, 2nd Edn., p. 650, 2018.</mixed-citation></ref>
      <ref id="bib1.bibx26"><label>Ridley et al.(2018)</label><?label Ridley2018?><mixed-citation>Ridley, J. K., Blockley, E. W., Keen, A. B., Rae, J. G. L., West, A. E., and
Schroeder, D.: The sea ice model component of HadGEM3-GC3.1,
Geosci. Model Dev., 11, 713–723,
<ext-link xlink:href="https://doi.org/10.5194/gmd-11-713-2018" ext-link-type="DOI">10.5194/gmd-11-713-2018</ext-link>, 2018.</mixed-citation></ref>
      <ref id="bib1.bibx27"><label>Righi et al.(2019)</label><?label ESMValToolv2discussions2019?><mixed-citation>Righi, M., Andela, B., Eyring, V., Lauer, A., Predoi, V., Schlund, M.,
Vegas-regidor, J., Bock, L., Brötz, B., Mora, L. D., Diblen, F.,
Dreyer, L., Drost, N., Earnshaw, P., Hassler, B., Koldunov, N., Little, B.,
Loosveldt, S., and Zimmermann, K.: Earth System Model Evaluation Tool (ESMValTool) v2.0 – technical overview, Geosci. Model Dev., 13, 1179–1199, <ext-link xlink:href="https://doi.org/10.5194/gmd-13-1179-2020" ext-link-type="DOI">10.5194/gmd-13-1179-2020</ext-link>, 2020.</mixed-citation></ref>
      <ref id="bib1.bibx28"><label>Rutherford and Royds(1908)</label><?label Rutherford1908?><mixed-citation>Rutherford, E. and Royds, T.: Spectrum of the Radium Emanation, Phil. Mag. S.,
16, 313–319, <ext-link xlink:href="https://doi.org/10.1080/14786440808636511" ext-link-type="DOI">10.1080/14786440808636511</ext-link>, 1908.</mixed-citation></ref>
      <ref id="bib1.bibx29"><label>Sanderson et al.(2009)</label><?label Sanderson2009?><mixed-citation>Sanderson, P. M., Liu, D., and Jenkins, S. A.: Auditory displays in
anesthesiology, Curr. Opin. Anaesthesio., 22, 788–295, <ext-link xlink:href="https://doi.org/10.1097/ACO.0b013e3283326a2f" ext-link-type="DOI">10.1097/ACO.0b013e3283326a2f</ext-link>, 2009.</mixed-citation></ref>
      <ref id="bib1.bibx30"><label>Schroeder(2002)</label><?label Schroeder2002?><mixed-citation>
Schroeder, C.: Hal Leonard Pocket Music Theory: A Comprehensive and Convenient
Source for All Musicians, Hal Leonard,   2002.</mixed-citation></ref>
      <ref id="bib1.bib1"><label>1</label><?label 1?><mixed-citation>Schubart, C. F. D. and DuBois, T. A.:
Ideen zu Einer Asthetik der Tonkunst:  An annotated translation
A Dissertation Presented to the Faculty of the graduate school university of Southern California in fulfillment of the requirements for the degree of Doctor of Philosophy for Musicology, available at<?pagebreak page278?>: <uri>https://www.musikipedia.dk/dokumenter/boeger/engelsk-tonkunst.pdf</uri>
(last access: 17 August 2020),  1983.</mixed-citation></ref>
      <ref id="bib1.bibx31"><label>Sellar et al.(2019)</label><?label Sellar2019?><mixed-citation>Sellar, A. A., Jones, C. G., Mulcahy, J. P., Tang, Y., Yool, A., Wiltshire, A.,
O'Connor, F. M., Stringer, M., Hill, R., Palmieri, J., Woodward, S., Mora,
L., Kuhlbrodt, T., Rumbold, S. T., Kelley, D. I., Ellis, R., Johnson, C. E.,
Walton, J., Abraham, N. L., Andrews, M. B., Andrews, T., Archibald, A. T.,
Berthou, S., Burke, E., Blockley, E., Carslaw, K., Dalvi, M., Edwards, J.,
Folberth, G. A., Gedney, N., Griffiths, P. T., Harper, A. B., Hendry, M. A.,
Hewitt, A. J., Johnson, B., Jones, A., Jones, C. D., Keeble, J., Liddicoat,
S., Morgenstern, O., Parker, R. J., Predoi, V., Robertson, E., Siahaan, A.,
Smith, R. S., Swaminathan, R., Woodhouse, M. T., Zeng, G., and Zerroukat, M.:  UKESM1: Description and Evaluation of the U.K. Earth System Model, J. Adv. Model. Earth Syst., 11, 4513–4558,
<ext-link xlink:href="https://doi.org/10.1029/2019MS001739" ext-link-type="DOI">10.1029/2019MS001739</ext-link>, 2019.</mixed-citation></ref>
      <ref id="bib1.bibx32"><label>Smith(2018)</label><?label BSmith2018?><mixed-citation>
Smith, B.: Poems for the Earth System Model, Magma poetry, Autumn, 72, 16–19,
2018.</mixed-citation></ref>
      <ref id="bib1.bibx33"><label>Storkey et al.(2018)</label><?label Storkey2018a?><mixed-citation>Storkey, D., Blaker, A. T., Mathiot, P., Megann, A., Aksenov, Y., Blockley,
E. W., Calvert, D., Graham, T., Hewitt, H. T., Hyder, P., Kuhlbrodt, T., Rae,
J. G., and Sinha, B.: UK Global Ocean GO6 and GO7: A traceable hierarchy of
model resolutions, Geosci.  Model Dev., 11, 3187–3213,
<ext-link xlink:href="https://doi.org/10.5194/gmd-11-3187-2018" ext-link-type="DOI">10.5194/gmd-11-3187-2018</ext-link>, 2018.</mixed-citation></ref>
      <ref id="bib1.bibx34"><label>The MIDI Manufacturers Association(1996)</label><?label MIDI1996?><mixed-citation>
The MIDI Manufacturers Association: The Complete MIDI 1.0 Detailed
Specification, The MIDI manufacturers Associationn, Los Angeles, CA, 3rd Edn., 1996.</mixed-citation></ref>
      <ref id="bib1.bibx35"><label>Think Media(2017)</label><?label whentopost2?><mixed-citation>Think Media: How Often Should You Post on YouTube? – 3 YouTube Upload
Schedule Tips, available at:
<uri>https://www.youtube.com/watch?v=A3kwRAB_-lQ</uri> (last access: 17 August 2020),
2017.</mixed-citation></ref>
      <ref id="bib1.bibx36"><label>Tsuchiya et al.(2015)</label><?label Tsuchiya2015?><mixed-citation>Tsuchiya, T., Freeman, J., and Lerner, L. W.: Data-to-music API : Real-time
data-agnostic sonification with musical structure models, The 21th
International Conference on Auditory Display,  244–251, 2015.
 </mixed-citation></ref><?xmltex \hack{\newpage}?>
      <ref id="bib1.bibx37"><label>Video Influencers(2016)</label><?label thumbnail1?><mixed-citation>Video Influencers: How to Make a YouTube Custom Thumbnail Tutorial – Quick
and Easy, available at:
<uri>https://www.youtube.com/watch?v=8YbZuaBP9B8</uri> (last access: 17 August 2020),
2016.</mixed-citation></ref>
      <ref id="bib1.bibx38"><label>Walden(2017)</label><?label humanise?><mixed-citation>Walden, J.: Cubase: Humanise Your Programmed Drums, available at:
<uri>https://www.soundonsound.com/techniques/cubase-humanise-your-programmed-drums</uri> (last access: 17 August 2020),
2017.</mixed-citation></ref>
      <ref id="bib1.bibx39"><label>Walker and Nees(2011)</label><?label Walker2011?><mixed-citation>
Walker, B. N. and Nees, M. A.: The Theory of Sonification, in: The
Sonification Handbook, edited by:  Hermann, T., Hunt, A., and Neuhoff, J. G.,
chap. 2,  Logos Publishing House, Berlin, GErmany, 9–39, 2011.</mixed-citation></ref>
      <ref id="bib1.bib2"><label>2</label><?label 1?><mixed-citation>World Climate Change Research Program (WCRP): Coupled Model Intercomparison Project (Phase 6) Data Search interface, available at: <uri>https://esgf-node.llnl.gov/projects/cmip6/</uri>, last access: 17 August 2020.</mixed-citation></ref>
      <ref id="bib1.bibx40"><label>West(2011)</label><?label West2011?><mixed-citation>
West, T.: Going Viral : Factors That Lead Videos to Become Internet
Phenomena, The Elon Journal of Undergraduate Research in Communications, 2,
76–84, 2011.</mixed-citation></ref>
      <ref id="bib1.bibx41"><label>Yool et al.(2013)</label><?label Yool2013a?><mixed-citation>Yool, A., Popova, E. E., and Anderson, T. R.: MEDUSA-2.0: An intermediate
complexity biogeochemical model of the marine carbon cycle for climate change
and ocean acidification studies, Geosci. Model Dev., 6,
1767–1811, <ext-link xlink:href="https://doi.org/10.5194/gmd-6-1767-2013" ext-link-type="DOI">10.5194/gmd-6-1767-2013</ext-link>, 2013.</mixed-citation></ref>
      <ref id="bib1.bibx42"><label>Yool et al.(2020)</label><?label Yool2020?><mixed-citation>Yool, A., Palmiéri, J., Jones, C. G., Sellar, A. A., de Mora, L., Kuhlbrodt,
T., Popova, E. E., Mulcahy, J. P., Wiltshire, A., Rumbold, S. T., Stringer,
M., Hill, R. S. R., Tang, Y., Walton, J., Blaker, A., Nurser, A. J. G.,
Coward, A. C., Hirschi, J., Woodward, S., Kelley, D. I., Ellis, R., and
Rumbold-Jones, S.: Spin-up of UK Earth System Model 1 (UKESM1) for CMIP6,
J. Adv. Model. Earth Syst., 12, e2019MS001933,
<ext-link xlink:href="https://doi.org/10.1029/2019MS001933" ext-link-type="DOI">10.1029/2019MS001933</ext-link>, 2020.</mixed-citation></ref>

  </ref-list></back>
    <!--<article-title-html>Earth system music: music generated from the United Kingdom Earth System Model (UKESM1)</article-title-html>
<abstract-html><p>Scientific data are almost always represented graphically in figures or in videos.
With the ever-growing interest from the general public in understanding
climate sciences, it is becoming increasingly important that scientists present this
information in ways that are both accessible and engaging to non-experts.</p><p>In this pilot study, we use time series data from the first United Kingdom Earth System Model (UKESM1) to
create six procedurally generated musical pieces.
Each of these pieces presents a unique aspect of the ocean component of the UKESM1,
either in terms of a scientific principle or a practical aspect of modelling.
In addition, each piece is arranged using a different musical progression, style and tempo.</p><p>These pieces were created in the Musical Instrument Digital Interface (MIDI) format
and then performed by a digital piano synthesiser.
An associated video showing the time development of the data in time with the music was also created.
The music and video were published on the lead author's YouTube channel.
A brief description of the methodology was also posted alongside the video.
We also discuss the limitations of this pilot study and describe
several approaches to extend and expand upon this work.</p></abstract-html>
<ref-html id="bib1.bib1"><label>Borromeo et al.(2016)</label><mixed-citation>
Borromeo, L., Round, K., and Perera, J.: Climate Symphony, available at:
<a href="https://www.disobedientfilms.com/climate-symphony" target="_blank"/> (last access: 17 August 2020),
2016.
</mixed-citation></ref-html>
<ref-html id="bib1.bib2"><label>Caldeira and Wickett(2003)</label><mixed-citation>
Caldeira, K. and Wickett, M. E.: Anthropogenic carbon and ocean pH, Nature,
425, 365–365, <a href="https://doi.org/10.1038/425365a" target="_blank">https://doi.org/10.1038/425365a</a>, 2003.
</mixed-citation></ref-html>
<ref-html id="bib1.bib3"><label>Clendinning and Marvin(2016)</label><mixed-citation>
Clendinning, J. P. and Marvin, E. W.: The Musician's Guide To Theory And
Analysis, W. W. Norton &amp; Company, 3rd Edn.,
2016.
</mixed-citation></ref-html>
<ref-html id="bib1.bib4"><label>Coltrane(1960)</label><mixed-citation>
Coltrane, J.: Giant Steps (album), Atlantic Records, Published February 1960.
</mixed-citation></ref-html>
<ref-html id="bib1.bib5"><label>Corey(2016)</label><mixed-citation>
Corey, M.: MIDITime python library for MIDI, available at:
<a href="https://github.com/cirlabs/miditime" target="_blank"/> (last access: 17 August 2020),
2016.
</mixed-citation></ref-html>
<ref-html id="bib1.bib6"><label>Cox(2017)</label><mixed-citation>
Cox, S.: How Often Should You Upload Videos to YouTube to Get More Views, available at:
<a href="https://www.filmora.io/community-blog/how-often-should-you-upload-to-youtube-consistent-posting-187.html" target="_blank">https://www.filmora.io/community-blog/how-often-should-you-upload-to-youtube–consistent-posting-187.html</a> (last access: 17 August 2020),
2017.
</mixed-citation></ref-html>
<ref-html id="bib1.bib7"><label>Craven and Mcindoe(1999)</label><mixed-citation>
Craven, R. M. and Mcindoe, A. K.: Continuous auditory monitoring – how much
information do we register?, Brit. J. Anaesth., 83, 747–749,
1999.
</mixed-citation></ref-html>
<ref-html id="bib1.bib8"><label>Crawford(2013)</label><mixed-citation>
Crawford, D.: Planetary Bands, Warming World string quartet, Video published by Ensia magazine, available at: <a href="https://vimeo.com/127083533" target="_blank"/>  (last access: 17 August 2020),
2013
</mixed-citation></ref-html>
<ref-html id="bib1.bib9"><label>de Mora(2019)</label><mixed-citation>
de Mora, L.: Lee de Mora's YouTube channel homepage, available at:
<a href="https://www.youtube.com/c/LeedeMora" target="_blank"/> (last access: 17 August 2020),
2019.
</mixed-citation></ref-html>
<ref-html id="bib1.bib10"><label>de Mora et al.(2018)</label><mixed-citation>
de Mora, L., Yool, A., Palmieri, J., Sellar, A., Kuhlbrodt, T., Popova, E., Jones, C., and Allen, J. I.: BGC-val: a model- and grid-independent Python toolkit to evaluate marine biogeochemical models, Geosci. Model Dev., 11, 4215–4240, <a href="https://doi.org/10.5194/gmd-11-4215-2018" target="_blank">https://doi.org/10.5194/gmd-11-4215-2018</a>, 2018.
</mixed-citation></ref-html>
<ref-html id="bib1.bib11"><label>Eyring et al.(2016)</label><mixed-citation>
Eyring, V., Bony, S., Meehl, G. A., Senior, C. A., Stevens, B., Stouffer,
R. J., and Taylor, K. E.: Overview of the Coupled Model Intercomparison
Project Phase 6 (CMIP6) experimental design and organization, Geosci.
Model Dev., 9, 1937–1958, <a href="https://doi.org/10.5194/gmd-9-1937-2016" target="_blank">https://doi.org/10.5194/gmd-9-1937-2016</a>, 2016.
</mixed-citation></ref-html>
<ref-html id="bib1.bib12"><label>FFmpeg Developers(2017)</label><mixed-citation>
FFmpeg Developers: FFmpeg, a complete, cross-platform solution to record,
convert and stream audio and video, available at:
<a href="https://ffmpeg.org/" target="_blank"/> (last access: 17 August 2020),
2017.
</mixed-citation></ref-html>
<ref-html id="bib1.bib13"><label>Flowers(2005)</label><mixed-citation>
Flowers, J. H.: Thirteen years of reflection on auditory graphing: promises,
pitfalls and potential new directions, Proceedings of ICAD 05-Eleventh Meeting of the International Conference on Auditory Display, Limerick, Ireland, 6–9 July,   406–409, 2005.
</mixed-citation></ref-html>
<ref-html id="bib1.bib14"><label>Google(2019)</label><mixed-citation>
Google: Manage your channel with Creator Studio – Youtube studio support
website, available at:
<a href="https://support.google.com/youtube/answer/9440613" target="_blank"/> (last access: 17 August 2020),
2019.
</mixed-citation></ref-html>
<ref-html id="bib1.bib15"><label>Izumo and Toivonen(2004)</label><mixed-citation>
Izumo, M. and Toivonen, T.: TiMidity+ +  open source MIDI to WAVE converter and
player, available at: <a href="http://timidity.sourceforge.net/" target="_blank"/> (last access: 17 August 2020),
2004.
</mixed-citation></ref-html>
<ref-html id="bib1.bib16"><label>Jiang et al.(2014)</label><mixed-citation>
Jiang, L., Miao, Y., Yang, Y., Lan, Z., and Hauptmann, A. G.: Viral Video
Style: A Closer Look at Viral Videos on YouTube, in: Proceedings of
International Conference on Multimedia Retrieval, 193–200,
<a href="https://doi.org/10.1145/2578726.2578754" target="_blank">https://doi.org/10.1145/2578726.2578754</a>, 2014.
</mixed-citation></ref-html>
<ref-html id="bib1.bib17"><label>Jones et al.(2016)</label><mixed-citation>
Jones, C. D., Arora, V., Friedlingstein, P., Bopp, L., Brovkin, V., Dunne, J.,
Graven, H., Hoffman, F., Ilyina, T., John, J. G., Jung, M., Kawamiya, M.,
Koven, C., Pongratz, J., Raddatz, T., Randerson, J. T., and Zaehle, S.: C4MIP
– The Coupled Climate–Carbon Cycle Model Intercomparison Project:
experimental protocol for CMIP6, Geosci. Model Dev., 9,
2853–2880, <a href="https://doi.org/10.5194/gmd-9-2853-2016" target="_blank">https://doi.org/10.5194/gmd-9-2853-2016</a>, 2016.
</mixed-citation></ref-html>
<ref-html id="bib1.bib18"><label>Kjellberg and PewDiePie(2017)</label><mixed-citation>
Kjellberg, F. A. U. and PewDiePie: How to make really good thumbnails on
YouTube, available at:
<a href="https://www.youtube.com/watch?v=Nz3Ngt0AMDA" target="_blank"/> (last access: 17 August 2020),
2017.
</mixed-citation></ref-html>
<ref-html id="bib1.bib19"><label>Morris and Mohacsi(2005)</label><mixed-citation>
Morris, R. W. and Mohacsi, P. J.: How Well Can Anaesthetists Discriminate
Pulse Oximeter Tones?, Anaesth Intensive Care, 33, 497–500, 2005.
</mixed-citation></ref-html>
<ref-html id="bib1.bib20"><label>MuseScore BVBA(2019)</label><mixed-citation>
MuseScore BVBA: MuseScore Music Score Editor, available at:
<a href="https://musescore.com/" target="_blank"/> (last access: 17 August 2020),
2019.
</mixed-citation></ref-html>
<ref-html id="bib1.bib21"><label>Myers(2019)</label><mixed-citation>
Myers, L.: This is How to Create the Best YouTube Thumbnails, available at:
<a href="https://louisem.com/198803/how-to-youtube-thumbnails" target="_blank"/> (last access: 17 August 2020),
2019.
</mixed-citation></ref-html>
<ref-html id="bib1.bib22"><label>Neely(2019)</label><mixed-citation>
Neely, A.: The 7 Levels of Jazz Harmony, available at:
<a href="https://www.youtube.com/watch?v=lz3WR-F_pnM" target="_blank"/> (last access: 17 August 2020),
2019.
</mixed-citation></ref-html>
<ref-html id="bib1.bib23"><label>Orr et al.(2005)</label><mixed-citation>
Orr, J. C., Fabry, V. J., Aumont, O., Bopp, L., Doney, S. C., Feely, R. A.,
Gnanadesikan, A., Gruber, N., Ishida, A., Joos, F., Key, R. M., Lindsay, K.,
Maier-reimer, E., Matear, R., Monfray, P., Mouchet, A., Najjar, R. G.,
Slater, R. D., Totterdell, I. J., Weirig, M.-f., Yamanaka, Y., and Yool, A.:
Anthropogenic ocean acidification over the twenty-first century and its
impact on calcifying organisms, Nature, 437, 681–686,
<a href="https://doi.org/10.1038/nature04095" target="_blank">https://doi.org/10.1038/nature04095</a>, 2005.
</mixed-citation></ref-html>
<ref-html id="bib1.bib24"><label>Pollack and Ficks(1954)</label><mixed-citation>
Pollack, I. and Ficks, L.: The Information of Elementary Multidimensional
Auditory Displays,  J. Acoust. Soc. Am., 26, p. 136,
<a href="https://doi.org/10.1121/1.1917759" target="_blank">https://doi.org/10.1121/1.1917759</a>, 1954.
</mixed-citation></ref-html>
<ref-html id="bib1.bib25"><label>Revill(2018)</label><mixed-citation>
Revill, G.: Landscape, Music and Sonic Environments, in: The Routledge
Companion to Landscape Studies, edited by: Howard, P., Thompson, I.,
Waterton, E., and Atha, M.,   chap. 21, London, 2nd Edn., p. 650, 2018.
</mixed-citation></ref-html>
<ref-html id="bib1.bib26"><label>Ridley et al.(2018)</label><mixed-citation>
Ridley, J. K., Blockley, E. W., Keen, A. B., Rae, J. G. L., West, A. E., and
Schroeder, D.: The sea ice model component of HadGEM3-GC3.1,
Geosci. Model Dev., 11, 713–723,
<a href="https://doi.org/10.5194/gmd-11-713-2018" target="_blank">https://doi.org/10.5194/gmd-11-713-2018</a>, 2018.
</mixed-citation></ref-html>
<ref-html id="bib1.bib27"><label>Righi et al.(2019)</label><mixed-citation>
Righi, M., Andela, B., Eyring, V., Lauer, A., Predoi, V., Schlund, M.,
Vegas-regidor, J., Bock, L., Brötz, B., Mora, L. D., Diblen, F.,
Dreyer, L., Drost, N., Earnshaw, P., Hassler, B., Koldunov, N., Little, B.,
Loosveldt, S., and Zimmermann, K.: Earth System Model Evaluation Tool (ESMValTool) v2.0 – technical overview, Geosci. Model Dev., 13, 1179–1199, <a href="https://doi.org/10.5194/gmd-13-1179-2020" target="_blank">https://doi.org/10.5194/gmd-13-1179-2020</a>, 2020.
</mixed-citation></ref-html>
<ref-html id="bib1.bib28"><label>Rutherford and Royds(1908)</label><mixed-citation>
Rutherford, E. and Royds, T.: Spectrum of the Radium Emanation, Phil. Mag. S.,
16, 313–319, <a href="https://doi.org/10.1080/14786440808636511" target="_blank">https://doi.org/10.1080/14786440808636511</a>, 1908.
</mixed-citation></ref-html>
<ref-html id="bib1.bib29"><label>Sanderson et al.(2009)</label><mixed-citation>
Sanderson, P. M., Liu, D., and Jenkins, S. A.: Auditory displays in
anesthesiology, Curr. Opin. Anaesthesio., 22, 788–295, <a href="https://doi.org/10.1097/ACO.0b013e3283326a2f" target="_blank">https://doi.org/10.1097/ACO.0b013e3283326a2f</a>, 2009.
</mixed-citation></ref-html>
<ref-html id="bib1.bib30"><label>Schroeder(2002)</label><mixed-citation>
Schroeder, C.: Hal Leonard Pocket Music Theory: A Comprehensive and Convenient
Source for All Musicians, Hal Leonard,   2002.
</mixed-citation></ref-html>
<ref-html id="bib1.bib31"><label>1</label><mixed-citation>
Schubart, C. F. D. and DuBois, T. A.:
Ideen zu Einer Asthetik der Tonkunst:  An annotated translation
A Dissertation Presented to the Faculty of the graduate school university of Southern California in fulfillment of the requirements for the degree of Doctor of Philosophy for Musicology, available at: <a href="https://www.musikipedia.dk/dokumenter/boeger/engelsk-tonkunst.pdf" target="_blank"/>
(last access: 17 August 2020),  1983.
</mixed-citation></ref-html>
<ref-html id="bib1.bib32"><label>Sellar et al.(2019)</label><mixed-citation>
Sellar, A. A., Jones, C. G., Mulcahy, J. P., Tang, Y., Yool, A., Wiltshire, A.,
O'Connor, F. M., Stringer, M., Hill, R., Palmieri, J., Woodward, S., Mora,
L., Kuhlbrodt, T., Rumbold, S. T., Kelley, D. I., Ellis, R., Johnson, C. E.,
Walton, J., Abraham, N. L., Andrews, M. B., Andrews, T., Archibald, A. T.,
Berthou, S., Burke, E., Blockley, E., Carslaw, K., Dalvi, M., Edwards, J.,
Folberth, G. A., Gedney, N., Griffiths, P. T., Harper, A. B., Hendry, M. A.,
Hewitt, A. J., Johnson, B., Jones, A., Jones, C. D., Keeble, J., Liddicoat,
S., Morgenstern, O., Parker, R. J., Predoi, V., Robertson, E., Siahaan, A.,
Smith, R. S., Swaminathan, R., Woodhouse, M. T., Zeng, G., and Zerroukat, M.:  UKESM1: Description and Evaluation of the U.K. Earth System Model, J. Adv. Model. Earth Syst., 11, 4513–4558,
<a href="https://doi.org/10.1029/2019MS001739" target="_blank">https://doi.org/10.1029/2019MS001739</a>, 2019.
</mixed-citation></ref-html>
<ref-html id="bib1.bib33"><label>Smith(2018)</label><mixed-citation>
Smith, B.: Poems for the Earth System Model, Magma poetry, Autumn, 72, 16–19,
2018.
</mixed-citation></ref-html>
<ref-html id="bib1.bib34"><label>Storkey et al.(2018)</label><mixed-citation>
Storkey, D., Blaker, A. T., Mathiot, P., Megann, A., Aksenov, Y., Blockley,
E. W., Calvert, D., Graham, T., Hewitt, H. T., Hyder, P., Kuhlbrodt, T., Rae,
J. G., and Sinha, B.: UK Global Ocean GO6 and GO7: A traceable hierarchy of
model resolutions, Geosci.  Model Dev., 11, 3187–3213,
<a href="https://doi.org/10.5194/gmd-11-3187-2018" target="_blank">https://doi.org/10.5194/gmd-11-3187-2018</a>, 2018.
</mixed-citation></ref-html>
<ref-html id="bib1.bib35"><label>The MIDI Manufacturers Association(1996)</label><mixed-citation>
The MIDI Manufacturers Association: The Complete MIDI 1.0 Detailed
Specification, The MIDI manufacturers Associationn, Los Angeles, CA, 3rd Edn., 1996.
</mixed-citation></ref-html>
<ref-html id="bib1.bib36"><label>Think Media(2017)</label><mixed-citation>
Think Media: How Often Should You Post on YouTube? – 3 YouTube Upload
Schedule Tips, available at:
<a href="https://www.youtube.com/watch?v=A3kwRAB_-lQ" target="_blank"/> (last access: 17 August 2020),
2017.
</mixed-citation></ref-html>
<ref-html id="bib1.bib37"><label>Tsuchiya et al.(2015)</label><mixed-citation>
Tsuchiya, T., Freeman, J., and Lerner, L. W.: Data-to-music API : Real-time
data-agnostic sonification with musical structure models, The 21th
International Conference on Auditory Display,  244–251, 2015.

</mixed-citation></ref-html>
<ref-html id="bib1.bib38"><label>Video Influencers(2016)</label><mixed-citation>
Video Influencers: How to Make a YouTube Custom Thumbnail Tutorial – Quick
and Easy, available at:
<a href="https://www.youtube.com/watch?v=8YbZuaBP9B8" target="_blank"/> (last access: 17 August 2020),
2016.
</mixed-citation></ref-html>
<ref-html id="bib1.bib39"><label>Walden(2017)</label><mixed-citation>
Walden, J.: Cubase: Humanise Your Programmed Drums, available at:
<a href="https://www.soundonsound.com/techniques/cubase-humanise-your-programmed-drums" target="_blank"/> (last access: 17 August 2020),
2017.
</mixed-citation></ref-html>
<ref-html id="bib1.bib40"><label>Walker and Nees(2011)</label><mixed-citation>
Walker, B. N. and Nees, M. A.: The Theory of Sonification, in: The
Sonification Handbook, edited by:  Hermann, T., Hunt, A., and Neuhoff, J. G.,
chap. 2,  Logos Publishing House, Berlin, GErmany, 9–39, 2011.
</mixed-citation></ref-html>
<ref-html id="bib1.bib41"><label>2</label><mixed-citation>
World Climate Change Research Program (WCRP): Coupled Model Intercomparison Project (Phase 6) Data Search interface, available at: <a href="https://esgf-node.llnl.gov/projects/cmip6/" target="_blank"/>, last access: 17 August 2020.
</mixed-citation></ref-html>
<ref-html id="bib1.bib42"><label>West(2011)</label><mixed-citation>
West, T.: Going Viral : Factors That Lead Videos to Become Internet
Phenomena, The Elon Journal of Undergraduate Research in Communications, 2,
76–84, 2011.
</mixed-citation></ref-html>
<ref-html id="bib1.bib43"><label>Yool et al.(2013)</label><mixed-citation>
Yool, A., Popova, E. E., and Anderson, T. R.: MEDUSA-2.0: An intermediate
complexity biogeochemical model of the marine carbon cycle for climate change
and ocean acidification studies, Geosci. Model Dev., 6,
1767–1811, <a href="https://doi.org/10.5194/gmd-6-1767-2013" target="_blank">https://doi.org/10.5194/gmd-6-1767-2013</a>, 2013.
</mixed-citation></ref-html>
<ref-html id="bib1.bib44"><label>Yool et al.(2020)</label><mixed-citation>
Yool, A., Palmiéri, J., Jones, C. G., Sellar, A. A., de Mora, L., Kuhlbrodt,
T., Popova, E. E., Mulcahy, J. P., Wiltshire, A., Rumbold, S. T., Stringer,
M., Hill, R. S. R., Tang, Y., Walton, J., Blaker, A., Nurser, A. J. G.,
Coward, A. C., Hirschi, J., Woodward, S., Kelley, D. I., Ellis, R., and
Rumbold-Jones, S.: Spin-up of UK Earth System Model 1 (UKESM1) for CMIP6,
J. Adv. Model. Earth Syst., 12, e2019MS001933,
<a href="https://doi.org/10.1029/2019MS001933" target="_blank">https://doi.org/10.1029/2019MS001933</a>, 2020.
</mixed-citation></ref-html>--></article>
