IOs directly play original audio from UDP stream (NSData)

I write data to the Server and send it immediately to the client. The client receives UDP packets like this:

(void)udpSocket:(GCDAsyncUdpSocket *)sock didReceiveData:**(NSData *)data** fromAddress:(NSData *)address withFilterContext:(id)filterContext
{    
if (!isRunning) return;
if (data)
{        
}
else
{      
}
}

      

Now the original data is in the data variable. I want to play it immediately. I am really sitting on this issue for like 2 days ... I just want something light like Audio Track in Java. I read a lot about sound queuing etc but still don't get it. Can you give me a hint, but in code form please. I think I have checked every site -.- look at every example but don't understand them. The callback functions are triggered after the buffer is full (in many examples), but I don't see how I can fill them with my NSData.

+3


source to share


2 answers


I am interested to hear the answers to this question. My solution was to create a custom audioserver in iOS using OpenAL, which out of the box just handles audio buffers - basically one stream handler consuming the audio stream sent from the server - another stream to run your own OpenAL server which I am setting out here:



#import <OpenAL/al.h>
#import <OpenAL/alc.h>
#import <AudioToolbox/ExtendedAudioFile.h>


-(void) init_openal {

openal_device = alcOpenDevice(NULL);

if (openal_device != NULL) {

    // create context

    openal_context = alcCreateContext(openal_device, 0);

    if (openal_context != NULL) {

        // activate this new context

        alcMakeContextCurrent(openal_context);

    } else {

        NSLog(@"STR_OPENAL ERROR - failed to create context");
        return;
    }

} else {

    NSLog(@"STR_OPENAL ERROR - failed to get audio device");
    return;
}

alGenBuffers(MAX_OPENAL_QUEUE_BUFFERS, available_AL_buffer_array);  // allocate the buffer array to given number of buffers

alGenSources(1, & streaming_source);

printf("STR_OPENAL  streaming_source starts with %u\n", streaming_source);

printf("STR_OPENAL  initialization of available_AL_buffer_array_curr_index to 0\n");

available_AL_buffer_array_curr_index = 0;

self.previous_local_lpcm_buffer = 0;

[self get_next_buffer];

}   //  init_openal 



// calling init and retrieving buffers logic left out goes here 

-(void) inner_run {

ALenum al_error;

// UN queue used buffers

ALint buffers_processed = 0;

alGetSourcei(streaming_source, AL_BUFFERS_PROCESSED, & buffers_processed);   // get source parameter num used buffs

while (buffers_processed > 0) {     // we have a consumed buffer so we need to replenish 

    NSLog(@"STR_OPENAL inner_run seeing consumed buffer");

    ALuint unqueued_buffer;

    alSourceUnqueueBuffers(streaming_source, 1, & unqueued_buffer);

    // about to decrement available_AL_buffer_array_curr_index 

    available_AL_buffer_array_curr_index--;

    printf("STR_OPENAL   to NEW %d  with unqueued_buffer %d\n",
           available_AL_buffer_array_curr_index,
           unqueued_buffer);

    available_AL_buffer_array[available_AL_buffer_array_curr_index] = unqueued_buffer;

    buffers_processed--;
}

// queue UP fresh buffers

if (available_AL_buffer_array_curr_index >= MAX_OPENAL_QUEUE_BUFFERS) {

    printf("STR_OPENAL about to sleep since internal OpenAL queue is full\n");

    [NSThread sleepUntilDate:[NSDate dateWithTimeIntervalSinceNow: SLEEP_ON_OPENAL_QUEUE_FULL]];

} else {

    NSLog(@"STR_OPENAL YYYYYYYYY available_AL_buffer_array_curr_index %d    MAX_OPENAL_QUEUE_BUFFERS %d",
          available_AL_buffer_array_curr_index,
          MAX_OPENAL_QUEUE_BUFFERS
          );

    ALuint curr_audio_buffer = available_AL_buffer_array[available_AL_buffer_array_curr_index];

    ALsizei size_buff;
    ALenum data_format;
    ALsizei sample_rate;

    size_buff = MAX_SIZE_CIRCULAR_BUFFER;   // works nicely with 1016064

    sample_rate = lpcm_output_sampling_frequency;
    data_format = AL_FORMAT_STEREO16;     // AL_FORMAT_STEREO16 == 4355 ( 0x1103 )  ---  AL_FORMAT_MONO16

    printf("STR_OPENAL  curr_audio_buffer is %u    data_format %u    size_buff %u\n",
           curr_audio_buffer,
           data_format,
           size_buff
           );


    // write_output_file([TS_ONLY_delete_this_var_temp_aif_fullpath
    // cStringUsingEncoding:NSUTF8StringEncoding], curr_lpcm_buffer, 
    // curr_lpcm_buffer_sizeof);


    if (self.local_lpcm_buffer == self.previous_local_lpcm_buffer) {

        printf("STR_OPENAL NOTICE - need to throttle up openal sleep duration seeing same value for local_lpcm_buffer %d - so will skip loading into alBufferData\n",
               (int) self.local_lpcm_buffer);

    } else {


        NSLog(@"STR_OPENAL  about to call alBufferData curr_audio_buffer %d local_lpcm_buffer address %d local_aac_index %d",
              curr_audio_buffer,
              (int) self.local_lpcm_buffer,
              self.local_aac_index);

        // copy audio data into curr_buffer

        alBufferData(curr_audio_buffer, data_format, self.local_lpcm_buffer, size_buff, sample_rate); // curr_audio_buffer is an INT index determining which buffer to use

        self.previous_local_lpcm_buffer = self.local_lpcm_buffer;

        alSourceQueueBuffers(streaming_source, 1, & curr_audio_buffer);

        printf("STR_OPENAL  about to increment available_AL_buffer_array_curr_index from OLD %d",
           available_AL_buffer_array_curr_index);

        available_AL_buffer_array_curr_index++;

        printf("STR_OPENAL  available_AL_buffer_array_curr_index to NEW %d\n", available_AL_buffer_array_curr_index);
    }

    al_error = alGetError();
    if(AL_NO_ERROR != al_error)
    {
        NSLog(@"STR_OPENAL ERROR - alSourceQueueBuffers error: %s", alGetString(al_error));
        return;
    }

    ALenum current_playing_state;

    alGetSourcei(streaming_source, AL_SOURCE_STATE, & current_playing_state); // get source parameter STATE

    al_error = alGetError();
    if(AL_NO_ERROR != al_error)
    {
        NSLog(@"STR_OPENAL ERROR - alGetSourcei error: %s", alGetString(al_error));
        return;
    }

    if (AL_PLAYING != current_playing_state) {

        ALint buffers_queued = 0;

        alGetSourcei(streaming_source, AL_BUFFERS_QUEUED, & buffers_queued); // get source parameter num queued buffs

        NSLog(@"STR_OPENAL NOTICE - play is NOT AL_PLAYING: %x, buffers_queued: %d", current_playing_state, buffers_queued);

        if (buffers_queued > 0 && NO == self.streaming_paused) {

            // restart play

            NSLog(@"STR_OPENAL about to restart play");

            alSourcePlay(streaming_source);

            al_error = alGetError();
            if (AL_NO_ERROR != al_error) {

                NSLog(@"STR_OPENAL ERROR - alSourcePlay error: %s", alGetString(al_error));
            }
        }
    }


    if (self.last_aac_index == self.local_aac_index && available_AL_buffer_array_curr_index == 0) {

        NSLog(@"STR_OPENAL reached end of event tell parent");

        [self send_running_condition_message_to_parent: rendered_last_buffer];

        flag_continue_running = false; // terminate since all rendering work is done

    } else {

        [self get_next_buffer];
    }
}
}     //      inner_run

      

0


source


I think there is no out-of-the-box iOS solution for this. Dive into the CoreAudio framework. Or find ready-made libraries StreamingKit Library



0


source







All Articles