- //LinearInterpolation.c
- //gcc MKAiff.c LinearInterpolation.c -o LinearInterpolation
- #include "MKAiff.h"
- #include <math.h>
- #define TRANSPOSITION -21
- #define NUM_CHANNELS aiffNumChannels(prerecordedAudio)
- #define SAMPLE_RATE aiffSampleRate(prerecordedAudio)
- #define BITS_PER_SAMPLE aiffBitsPerSample(prerecordedAudio)
- #define PRERECORDED_AUDIO_PATH "demo.aif"
- #define BUFFER_NUM_FRAMES 4096
- #define BUFFER_NUM_SAMPLES (BUFFER_NUM_FRAMES * NUM_CHANNELS)
- void linearInterpolateBuffer(float* previousFrame, int numChannels, float* input, int inNumFrames, float* output, int outFrames);
- void cubicInterpolateBuffer(float* previous3Frames, int numChannels, float* input, int inNumFrames, float* output, int outNumFrames);
- int main()
- {
- MKAiff* prerecordedAudio = aiffWithContentsOfFile(PRERECORDED_AUDIO_PATH);
- if(prerecordedAudio == NULL) return 1;
- const float PLAYBACK_SPEED = pow(pow(2, -TRANSPOSITION), (1/12.0));
- const float NUM_SECONDS = (PLAYBACK_SPEED * aiffDurationInSeconds(prerecordedAudio));
- int i;
- MKAiff* aiff = aiffWithDurationInSeconds(NUM_CHANNELS, SAMPLE_RATE, BITS_PER_SAMPLE, NUM_SECONDS + 1);
- if(aiff == NULL) return 1;
- float audioBuffer[BUFFER_NUM_SAMPLES];
- float interpolatedAudioBuffer[(int)(BUFFER_NUM_SAMPLES * PLAYBACK_SPEED)];
- int numSamplesRead, numSamplesToWrite;
- //Linear Interpolation needs 1 previous frame
- float previousFrame[NUM_CHANNELS * 3];
- for(i=0; i<NUM_CHANNELS * 3; previousFrame[i++] = 0);
- /* //Cubic Interpolation needs 3 previous frames
- float previousFrame[NUM_CHANNELS * 3];
- for(i=0; i<NUM_CHANNELS * 3; previousFrame[i++] = 0);
- */
- do{
- numSamplesRead = aiffReadFloatingPointSamplesAtPlayhead(prerecordedAudio, audioBuffer, BUFFER_NUM_SAMPLES);
- numSamplesToWrite = numSamplesRead * PLAYBACK_SPEED;
- numSamplesToWrite -= (numSamplesToWrite % NUM_CHANNELS);
- linearInterpolateBuffer(previousFrame, NUM_CHANNELS, audioBuffer, numSamplesRead / NUM_CHANNELS, interpolatedAudioBuffer, numSamplesToWrite / NUM_CHANNELS);
- //cubicInterpolateBuffer (previousFrame, NUM_CHANNELS, audioBuffer, numSamplesRead / NUM_CHANNELS, interpolatedAudioBuffer, numSamplesToWrite / NUM_CHANNELS);
- aiffAppendFloatingPointSamples(aiff, interpolatedAudioBuffer, numSamplesToWrite, aiffFloatSampleType);
- }while(numSamplesRead == BUFFER_NUM_SAMPLES);
- aiffSaveWithFilename(aiff, "LinearInterpolation.aif");
- aiffDestroy(aiff);
- return 0;
- }
- void linearInterpolateBuffer(float* previousFrame, int numChannels, float* input, int inNumFrames, float* output, int outNumFrames)
- {
- int i, j, index;
- double distance, prevValue, nextValue;
- for(i=0; i<outNumFrames; i++)
- {
- for(j=0; j<numChannels; j++)
- {
- distance = i * (inNumFrames / (double)outNumFrames);
- index = ((int)distance) * numChannels + j;
- nextValue = input[index];
- prevValue = distance < 1 ? previousFrame[j] : input[index-numChannels];
- distance -= (int)distance;
- output[i*numChannels+j] = (nextValue-prevValue) * distance + prevValue;
- }
- }
- for(j=0; j<numChannels; j++)
- previousFrame[j] = input[(inNumFrames - 1) * numChannels + j];
- }
- void cubicInterpolateBuffer(float* previous3Frames, int numChannels, float* input, int inNumFrames, float* output, int outNumFrames)
- {
- int i, j, index;
- double distance, prev, prevPrev, next, nextNext;
- double a, b, c, d;
- for(i=0; i<outNumFrames; i++)
- {
- for(j=0; j<numChannels; j++)
- {
- distance = i * (inNumFrames / (double)outNumFrames);
- index = ((int)distance) * numChannels + j;
- nextNext = input[index ];
- next = distance < 1 ? previous3Frames[index + 2*numChannels] : input[index - numChannels];
- prev = distance < 2 ? previous3Frames[index + numChannels ] : input[index - 2*numChannels];
- prevPrev = distance < 3 ? previous3Frames[index ] : input[index - 3*numChannels];
- distance -= (int)distance;
- a = nextNext - next - prevPrev + prev;
- b = prevPrev - prev - a;
- c = next - prevPrev;
- d = prev;
- output[i*numChannels+j] = ((a * distance * distance * distance) + (b * distance*distance) + (c * distance) + (d));
- }
- }
- for(j=0; j<numChannels*3; j++)
- previous3Frames[j] = input[(inNumFrames - 3) * numChannels + j];
- }
Output:
Explanation of the Concepts
This example shows how to use the interpolation algorithms in the previous sections of this chapter. It example reads in the audio file "demo.aif", and transposes it down 21 half-steps by interpolating it.
Download Demo.aifThe most memory-efficient way to interpolate a longer file is to read a smaller number (say a few thousand) of samples out of the original file, interpolate them, add them to a new file, and then get the next few thousand samples, and so forth, until the end of the file. The algorithms in the previous example were designed to facilitate this technique, so this example demonstrates the proper use of those functions.
From a musical perspective, we are not usually concerned with the duration of a sound after it is interpolated, so much as we are concerned with the pitch of that sound. Since interpolation deals in durations (number of samples), we will need to convert. The duration os a sound is inversely proportional to its pitch (i.e. raising the pitch reduces the duration), so when we transpose, if we know the ratio of the original pitch to the target pitch, then we also know the ratio of their durations. In the tempered chromatic scale, the ratio is one note to the next is equal to twelfth root of two. An interval of say, 5 half-steps is created by raising 2 to the 5th (the number of half steps), and taking the twelfth root of that. Furthermore, (more high-school math) we remember that taking the 12th root of two is the same as raising two to the power of 1/12. So, if we want to transpose a sound by a certain number of half steps, we can find the ratio of their frequencies like so:
frequencyRatio = (2^numHalfsteps)^(1/12)
The ratio ratio of the duration of the original audio file to the interpolated audio file would just be one over this number. The mathematical shortcut for this, however, is just to raise 2 to the negative number of half-steps, so that the duration ratio can be found like so:
durationRatio = (2^-numHalfsteps)^(1/12)
Then, of course, multiplying the number of samples in the original audio buffer by the duration-ratio gives the number of samples that will be required in the interpolated buffer to transpose the sound by the given amount.