diff --git a/multimedia/01_day/test-audio/Android.mk b/multimedia/01_day/test-audio/Android.mk new file mode 100644 index 0000000..fa1148b --- /dev/null +++ b/multimedia/01_day/test-audio/Android.mk @@ -0,0 +1,17 @@ +LOCAL_PATH:= $(call my-dir) + +include $(CLEAR_VARS) +LOCAL_SHARED_LIBRARIES := liblog libutils libmedia +LOCAL_SRC_FILES := my_audio_play.cpp +LOCAL_MODULE := my_audio_play +include $(BUILD_EXECUTABLE) + + +include $(CLEAR_VARS) +LOCAL_SHARED_LIBRARIES := liblog libutils libmedia +LOCAL_SRC_FILES := my_audio_record.cpp +LOCAL_MODULE := my_audio_record +include $(BUILD_EXECUTABLE) + + + diff --git a/multimedia/01_day/test-audio/a.wav b/multimedia/01_day/test-audio/a.wav new file mode 100644 index 0000000..52dcdaa Binary files /dev/null and b/multimedia/01_day/test-audio/a.wav differ diff --git a/multimedia/01_day/test-audio/my_audio_play.cpp b/multimedia/01_day/test-audio/my_audio_play.cpp new file mode 100644 index 0000000..7efdd2c --- /dev/null +++ b/multimedia/01_day/test-audio/my_audio_play.cpp @@ -0,0 +1,98 @@ +#include +#include +#include +#include + +using namespace android; + +#define ID_RIFF 0x46464952 +#define ID_WAVE 0x45564157 +#define ID_FMT 0x20746d66 +#define ID_DATA 0x61746164 + +struct riff_wave_header { + uint32_t riff_id; + uint32_t riff_sz; + uint32_t wave_id; +}; + +struct chunk_header { + uint32_t id; + uint32_t sz; +}; + +struct chunk_fmt { + uint16_t audio_format; + uint16_t num_channels; + uint32_t sample_rate; + uint32_t byte_rate; + uint16_t block_align; + uint16_t bits_per_sample; +}; +// ./myapp a.wav +// argv[1] +int main(int argc, char **argv) +{ + FILE *file; + struct riff_wave_header riff_wave_header; + struct chunk_header chunk_header; + struct chunk_fmt chunk_fmt; + char *filename; + int more_chunks = 1; + char buff[4096]; + int ret; + + filename = argv[1]; + file = fopen(filename, "rb"); + + fread(&riff_wave_header, sizeof(riff_wave_header), 1, file); + if ((riff_wave_header.riff_id != ID_RIFF) || + (riff_wave_header.wave_id != ID_WAVE)) { + fprintf(stderr, "Error: '%s' is not a riff/wave file\n", filename); + fclose(file); + return 1; + } + + do { + fread(&chunk_header, sizeof(chunk_header), 1, file); + + switch (chunk_header.id) { + case ID_FMT: + fread(&chunk_fmt, sizeof(chunk_fmt), 1, file); + /* If the format header is larger, skip the rest */ + if (chunk_header.sz > sizeof(chunk_fmt)) + fseek(file, chunk_header.sz - sizeof(chunk_fmt), SEEK_CUR); + break; + case ID_DATA: + /* Stop looking for chunks */ + more_chunks = 0; + break; + default: + /* Unknown chunk, skip bytes */ + fseek(file, chunk_header.sz, SEEK_CUR); + } + } while (more_chunks); + + sp track = new AudioTrack(AUDIO_STREAM_MUSIC,// stream type + 44100, + AUDIO_FORMAT_PCM_16_BIT,// word length, PCM + AUDIO_CHANNEL_OUT_STEREO, + 0); + + status_t status = track->initCheck(); + if(status != NO_ERROR) { + track.clear(); + printf("Failed for initCheck()\n"); + return -1; + } + + // PlaybackThread 생성 , createTrack, 공유메모리 공유 + track->start(); // Track => ActiveTrack + while( ret = fread( buff, 1, sizeof buff, file )) + track->write( buff, ret ); // 음원 전송 + + fclose(file); + return 0; +} + + diff --git a/multimedia/01_day/test-audio/my_audio_record.cpp b/multimedia/01_day/test-audio/my_audio_record.cpp new file mode 100644 index 0000000..8149c86 --- /dev/null +++ b/multimedia/01_day/test-audio/my_audio_record.cpp @@ -0,0 +1,156 @@ +#include +#include +#include +#include +#include +#include + +using namespace android; + +#define ID_RIFF 0x46464952 +#define ID_WAVE 0x45564157 +#define ID_FMT 0x20746d66 +#define ID_DATA 0x61746164 + +#define FORMAT_PCM 1 + +struct wav_header { + uint32_t riff_id; + uint32_t riff_sz; + uint32_t riff_fmt; + uint32_t fmt_id; + uint32_t fmt_sz; + uint16_t audio_format; + uint16_t num_channels; + uint32_t sample_rate; + uint32_t byte_rate; + uint16_t block_align; + uint16_t bits_per_sample; + uint32_t data_id; + uint32_t data_sz; +}; + +int capturing = 1; + +unsigned int capture_sample(FILE *file, + unsigned int channels, + unsigned int rate, + unsigned int period_size, + unsigned int period_count); + +void sigint_handler(int sig) +{ + printf("sigint_handler(%d)\n", sig ); + capturing = 0; +} + +int main(int argc, char **argv) +{ + FILE *file; + struct wav_header header; + unsigned int channels = 2; + unsigned int rate = 44100; + unsigned int bits = 16; + unsigned int frames; + unsigned int period_size = 1024; + unsigned int period_count = 4; + + printf("argc=%d\n", argc ); + file = fopen(argv[1], "wb"); + if (!file) { + fprintf(stderr, "Unable to create file '%s'\n", argv[1]); + return 1; + } + + header.riff_id = ID_RIFF; + header.riff_sz = 0; + header.riff_fmt = ID_WAVE; + header.fmt_id = ID_FMT; + header.fmt_sz = 16; + header.audio_format = FORMAT_PCM; + header.num_channels = channels; + header.sample_rate = rate; + + header.bits_per_sample = 16; + header.byte_rate = (header.bits_per_sample / 8) * channels * rate; + header.block_align = channels * (header.bits_per_sample / 8); + header.data_id = ID_DATA; + + fseek(file, sizeof(struct wav_header), SEEK_SET); + + signal(SIGINT, sigint_handler); + frames = capture_sample(file, + header.num_channels, + header.sample_rate, + period_size, + period_count); + printf("Captured %d frames\n", frames); + + header.data_sz = frames * header.block_align; + header.riff_sz = header.data_sz + sizeof(header) - 8; + fseek(file, 0, SEEK_SET); + fwrite(&header, sizeof(struct wav_header), 1, file); + + fclose(file); + + return 0; +} + +unsigned int capture_sample(FILE *file, + unsigned int channels, + unsigned int rate, + unsigned int period_size, + unsigned int period_count) +{ + char *buffer; + unsigned int size; + unsigned int bytes_read=0; + int kMaxBufferSize = 2048; + size_t minFrameCount; + status_t status = AudioRecord::getMinFrameCount(&minFrameCount, + rate, + AUDIO_FORMAT_PCM_16_BIT, + audio_channel_in_mask_from_count(channels)); + if (status == OK) { + uint32_t frameCount = kMaxBufferSize / sizeof(int16_t) / channels; + + size_t bufCount = 2; + while ((bufCount * frameCount) < minFrameCount) { + bufCount++; + } + + sp record = new AudioRecord( + AUDIO_SOURCE_DEFAULT, rate, AUDIO_FORMAT_PCM_16_BIT, + audio_channel_in_mask_from_count(channels), + (size_t) (bufCount * frameCount) + ); + status = record->initCheck(); + if(status != NO_ERROR) { + record.clear(); + printf("Failed for initCheck()"); + return 0; + } + + + printf("Capturing sample: %u ch, %u hz, %u bit\n", channels, rate, + 16); + + size = period_count * period_size * channels * 2; + buffer = (char*)malloc(size); + + record->start(); + + while (capturing) { + size = record->read( buffer, size); + if (fwrite(buffer, 1, size, file) != size) { + fprintf(stderr,"Error capturing sample\n"); + break; + } + bytes_read += size; + } + + free(buffer); + } + return bytes_read/4; +} + diff --git a/multimedia/01_day/test-ndk/jni/Android.mk b/multimedia/01_day/test-ndk/jni/Android.mk new file mode 100644 index 0000000..6603245 --- /dev/null +++ b/multimedia/01_day/test-ndk/jni/Android.mk @@ -0,0 +1,8 @@ +LOCAL_PATH := $(call my-dir) + +include $(CLEAR_VARS) +LOCAL_CFLAGS += -fPIE +LOCAL_LDFLAGS += -fPIE -pie +LOCAL_MODULE := myapp +LOCAL_SRC_FILES := myapp.c +include $(BUILD_EXECUTABLE) diff --git a/multimedia/01_day/test-ndk/jni/myapp.c b/multimedia/01_day/test-ndk/jni/myapp.c new file mode 100644 index 0000000..1e81937 --- /dev/null +++ b/multimedia/01_day/test-ndk/jni/myapp.c @@ -0,0 +1,8 @@ +#include + +int main() +{ + printf("Hello Android\n"); + return 0; +} + diff --git a/multimedia/01_day/test-pdk/Android.mk b/multimedia/01_day/test-pdk/Android.mk new file mode 100644 index 0000000..b69d1e4 --- /dev/null +++ b/multimedia/01_day/test-pdk/Android.mk @@ -0,0 +1,6 @@ +LOCAL_PATH := $(call my-dir) + +include $(CLEAR_VARS) +LOCAL_MODULE := myapp +LOCAL_SRC_FILES := myapp.c +include $(BUILD_EXECUTABLE) diff --git a/multimedia/01_day/test-pdk/myapp.c b/multimedia/01_day/test-pdk/myapp.c new file mode 100644 index 0000000..1e81937 --- /dev/null +++ b/multimedia/01_day/test-pdk/myapp.c @@ -0,0 +1,8 @@ +#include + +int main() +{ + printf("Hello Android\n"); + return 0; +} + diff --git a/multimedia/02_day/b.wav b/multimedia/02_day/b.wav new file mode 100644 index 0000000..2ae6515 Binary files /dev/null and b/multimedia/02_day/b.wav differ diff --git a/multimedia/02_day/test-HAL/atoi.c b/multimedia/02_day/test-HAL/atoi.c new file mode 100644 index 0000000..f763e79 --- /dev/null +++ b/multimedia/02_day/test-HAL/atoi.c @@ -0,0 +1,20 @@ +#include "main.h" + + +int my_atoi(char *buff) +{ + int i, sum=0; + for(i=0; buff[i]; i++ ) + sum = sum*10 + buff[i] - '0'; + + return sum; +} + + +int my_add(int a, int b ) +{ + return a+b; +} + + +struct _HMI HMI = { my_atoi, my_add, 10 }; diff --git a/multimedia/02_day/test-HAL/main.c b/multimedia/02_day/test-HAL/main.c new file mode 100644 index 0000000..0a6c4ca --- /dev/null +++ b/multimedia/02_day/test-HAL/main.c @@ -0,0 +1,26 @@ +#include +#include + +int main() +{ + int *global; + int (*func)(char*); + int (*add)(int,int); + int data; + void *handle = dlopen("libatoi.so", RTLD_LAZY); + + func = dlsym( handle, "my_atoi" ); + data = func("123"); + printf("data=%d\n", data ); + + add = dlsym( handle, "my_add" ); + data = add(1,2); + printf("data=%d\n", data ); + + *global = dlsym( handle, "global" ); + data = add(1,2); + printf("data=%d\n", data ); + dlclose(handle); +} + + diff --git a/multimedia/02_day/test-HAL/main.h b/multimedia/02_day/test-HAL/main.h new file mode 100644 index 0000000..36f6400 --- /dev/null +++ b/multimedia/02_day/test-HAL/main.h @@ -0,0 +1,10 @@ + +int my_atoi(char *buff); +int my_add(int a, int b); + +struct _HMI +{ + int (*atoi)(char *buff); + int (*add)(int , int ); + int global; +}; diff --git a/multimedia/02_day/test-ashmem1/Android.mk b/multimedia/02_day/test-ashmem1/Android.mk new file mode 100644 index 0000000..67392dc --- /dev/null +++ b/multimedia/02_day/test-ashmem1/Android.mk @@ -0,0 +1,14 @@ +LOCAL_PATH:= $(call my-dir) +include $(CLEAR_VARS) +LOCAL_SRC_FILES:= server.cpp +LOCAL_MODULE := my_server +LOCAL_SHARED_LIBRARIES:= libcutils libutils libbinder +LOCAL_MODULE_TAGS := optional +include $(BUILD_EXECUTABLE) + +include $(CLEAR_VARS) +LOCAL_SRC_FILES:= client.cpp +LOCAL_MODULE := my_client +LOCAL_SHARED_LIBRARIES:= libcutils libutils libbinder +LOCAL_MODULE_TAGS := optional +include $(BUILD_EXECUTABLE) diff --git a/multimedia/02_day/test-ashmem1/client.cpp b/multimedia/02_day/test-ashmem1/client.cpp new file mode 100644 index 0000000..d121363 --- /dev/null +++ b/multimedia/02_day/test-ashmem1/client.cpp @@ -0,0 +1,17 @@ +#include +#include +#include +#include +#include + +using namespace android; + +int main() +{ + sp sm = defaultServiceManager(); + sp binder = sm->getService( String16("my.ashmem1") ); + sp heap = interface_cast(binder); + char *p = (char*)heap->getBase(); + printf("[%s]\n", p ); + return 0; +} diff --git a/multimedia/02_day/test-ashmem1/server.cpp b/multimedia/02_day/test-ashmem1/server.cpp new file mode 100644 index 0000000..c8957e3 --- /dev/null +++ b/multimedia/02_day/test-ashmem1/server.cpp @@ -0,0 +1,18 @@ +#include +#include +#include +#include + +using namespace android; + +int main() +{ + sp sm = defaultServiceManager(); + sp heap = new MemoryHeapBase(4096); + sm->addService( String16("my.ashmem1") , heap ); + + char *p = (char*)heap->getBase(); + sprintf(p, "Hello Client!!\n" ); + IPCThreadState::self()->joinThreadPool(); + return 0; +} diff --git a/multimedia/02_day/test-ashmem2/.client.cpp.swp b/multimedia/02_day/test-ashmem2/.client.cpp.swp new file mode 100644 index 0000000..73aaa58 Binary files /dev/null and b/multimedia/02_day/test-ashmem2/.client.cpp.swp differ diff --git a/multimedia/02_day/test-ashmem2/Android.mk b/multimedia/02_day/test-ashmem2/Android.mk new file mode 100644 index 0000000..67392dc --- /dev/null +++ b/multimedia/02_day/test-ashmem2/Android.mk @@ -0,0 +1,14 @@ +LOCAL_PATH:= $(call my-dir) +include $(CLEAR_VARS) +LOCAL_SRC_FILES:= server.cpp +LOCAL_MODULE := my_server +LOCAL_SHARED_LIBRARIES:= libcutils libutils libbinder +LOCAL_MODULE_TAGS := optional +include $(BUILD_EXECUTABLE) + +include $(CLEAR_VARS) +LOCAL_SRC_FILES:= client.cpp +LOCAL_MODULE := my_client +LOCAL_SHARED_LIBRARIES:= libcutils libutils libbinder +LOCAL_MODULE_TAGS := optional +include $(BUILD_EXECUTABLE) diff --git a/multimedia/02_day/test-ashmem2/client.cpp b/multimedia/02_day/test-ashmem2/client.cpp new file mode 100644 index 0000000..5efdca6 --- /dev/null +++ b/multimedia/02_day/test-ashmem2/client.cpp @@ -0,0 +1,21 @@ +#include +#include +#include +#include +#include + +using namespace android; + +int main() +{ + sp sm = defaultServiceManager(); + sp binder = sm->getService( String16("my.ashmem1") ); + sp memory = interface_cast(binder); + ssize_t offset=0; + size_t size=0; + sp heap = memory->getMemory(&offset, &size); + + char *p = (char*)heap->getBase(); + printf("[%s]\n", p+offset ); + return 0; +} diff --git a/multimedia/02_day/test-ashmem2/server.cpp b/multimedia/02_day/test-ashmem2/server.cpp new file mode 100644 index 0000000..07bd830 --- /dev/null +++ b/multimedia/02_day/test-ashmem2/server.cpp @@ -0,0 +1,19 @@ +#include +#include +#include +#include +#include + +using namespace android; + +int main() +{ + sp sm = defaultServiceManager(); + sp heap = new MemoryHeapBase(4096); + sm->addService( String16("my.ashmem1") , new MemoryBase(heap, 100, 100) ); + + char *p = (char*)heap->getBase(); + sprintf(p+100, "Hello Client!!\n" ); + IPCThreadState::self()->joinThreadPool(); + return 0; +} diff --git a/multimedia/02_day/test-mixer/Android.mk b/multimedia/02_day/test-mixer/Android.mk new file mode 100644 index 0000000..11a2eb8 --- /dev/null +++ b/multimedia/02_day/test-mixer/Android.mk @@ -0,0 +1,36 @@ +LOCAL_PATH:= $(call my-dir) + +include $(CLEAR_VARS) + +LOCAL_SRC_FILES:= \ + test-mixer.cpp \ + AudioMixer.cpp.arm \ + +LOCAL_C_INCLUDES := \ + bionic \ + bionic/libstdc++/include \ + external/stlport/stlport \ + $(call include-path-for, audio-effects) \ + $(call include-path-for, audio-utils) \ + frameworks/av/services/audioflinger + +LOCAL_STATIC_LIBRARIES := \ + libsndfile + +LOCAL_SHARED_LIBRARIES := \ + libstlport \ + libeffects \ + libnbaio \ + libcommon_time_client \ + libaudioresampler \ + libaudioutils \ + libdl \ + libcutils \ + libutils \ + liblog + +LOCAL_MODULE:= test-mixer + +LOCAL_MODULE_TAGS := optional + +include $(BUILD_EXECUTABLE) diff --git a/multimedia/02_day/test-mixer/AudioMixer.cpp b/multimedia/02_day/test-mixer/AudioMixer.cpp new file mode 100644 index 0000000..fd28ea1 --- /dev/null +++ b/multimedia/02_day/test-mixer/AudioMixer.cpp @@ -0,0 +1,2239 @@ +/* +** +** Copyright 2007, The Android Open Source Project +** +** Licensed under the Apache License, Version 2.0 (the "License"); +** you may not use this file except in compliance with the License. +** You may obtain a copy of the License at +** +** http://www.apache.org/licenses/LICENSE-2.0 +** +** Unless required by applicable law or agreed to in writing, software +** distributed under the License is distributed on an "AS IS" BASIS, +** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +** See the License for the specific language governing permissions and +** limitations under the License. +*/ + +#define LOG_TAG "AudioMixer" +//#define LOG_NDEBUG 0 + +#include "Configuration.h" +#include +#include +#include +#include +#include + +#include +#include + +#include +#include +#include + +#include + +#include +#include +#include +#include + +#include +#include + +#include "AudioMixerOps.h" +#include "AudioMixer.h" + +// The FCC_2 macro refers to the Fixed Channel Count of 2 for the legacy integer mixer. +#ifndef FCC_2 +#define FCC_2 2 +#endif + +// Look for MONO_HACK for any Mono hack involving legacy mono channel to +// stereo channel conversion. + +/* VERY_VERY_VERBOSE_LOGGING will show exactly which process hook and track hook is + * being used. This is a considerable amount of log spam, so don't enable unless you + * are verifying the hook based code. + */ +//#define VERY_VERY_VERBOSE_LOGGING +#ifdef VERY_VERY_VERBOSE_LOGGING +#define ALOGVV ALOGV +//define ALOGVV printf // for test-mixer.cpp +#else +#define ALOGVV(a...) do { } while (0) +#endif + +#ifndef ARRAY_SIZE +#define ARRAY_SIZE(x) (sizeof(x)/sizeof((x)[0])) +#endif + +// Set kUseNewMixer to true to use the new mixer engine. Otherwise the +// original code will be used. This is false for now. +static const bool kUseNewMixer = false; + +// Set kUseFloat to true to allow floating input into the mixer engine. +// If kUseNewMixer is false, this is ignored or may be overridden internally +// because of downmix/upmix support. +static const bool kUseFloat = true; + +// Set to default copy buffer size in frames for input processing. +static const size_t kCopyBufferFrameCount = 256; + +namespace android { + +// ---------------------------------------------------------------------------- + +template +T min(const T& a, const T& b) +{ + return a < b ? a : b; +} + +AudioMixer::CopyBufferProvider::CopyBufferProvider(size_t inputFrameSize, + size_t outputFrameSize, size_t bufferFrameCount) : + mInputFrameSize(inputFrameSize), + mOutputFrameSize(outputFrameSize), + mLocalBufferFrameCount(bufferFrameCount), + mLocalBufferData(NULL), + mConsumed(0) +{ + ALOGV("CopyBufferProvider(%p)(%zu, %zu, %zu)", this, + inputFrameSize, outputFrameSize, bufferFrameCount); + LOG_ALWAYS_FATAL_IF(inputFrameSize < outputFrameSize && bufferFrameCount == 0, + "Requires local buffer if inputFrameSize(%zu) < outputFrameSize(%zu)", + inputFrameSize, outputFrameSize); + if (mLocalBufferFrameCount) { + (void)posix_memalign(&mLocalBufferData, 32, mLocalBufferFrameCount * mOutputFrameSize); + } + mBuffer.frameCount = 0; +} + +AudioMixer::CopyBufferProvider::~CopyBufferProvider() +{ + ALOGV("~CopyBufferProvider(%p)", this); + if (mBuffer.frameCount != 0) { + mTrackBufferProvider->releaseBuffer(&mBuffer); + } + free(mLocalBufferData); +} + +status_t AudioMixer::CopyBufferProvider::getNextBuffer(AudioBufferProvider::Buffer *pBuffer, + int64_t pts) +{ + //ALOGV("CopyBufferProvider(%p)::getNextBuffer(%p (%zu), %lld)", + // this, pBuffer, pBuffer->frameCount, pts); + if (mLocalBufferFrameCount == 0) { + status_t res = mTrackBufferProvider->getNextBuffer(pBuffer, pts); + if (res == OK) { + copyFrames(pBuffer->raw, pBuffer->raw, pBuffer->frameCount); + } + return res; + } + if (mBuffer.frameCount == 0) { + mBuffer.frameCount = pBuffer->frameCount; + status_t res = mTrackBufferProvider->getNextBuffer(&mBuffer, pts); + // At one time an upstream buffer provider had + // res == OK and mBuffer.frameCount == 0, doesn't seem to happen now 7/18/2014. + // + // By API spec, if res != OK, then mBuffer.frameCount == 0. + // but there may be improper implementations. + ALOG_ASSERT(res == OK || mBuffer.frameCount == 0); + if (res != OK || mBuffer.frameCount == 0) { // not needed by API spec, but to be safe. + pBuffer->raw = NULL; + pBuffer->frameCount = 0; + return res; + } + mConsumed = 0; + } + ALOG_ASSERT(mConsumed < mBuffer.frameCount); + size_t count = min(mLocalBufferFrameCount, mBuffer.frameCount - mConsumed); + count = min(count, pBuffer->frameCount); + pBuffer->raw = mLocalBufferData; + pBuffer->frameCount = count; + copyFrames(pBuffer->raw, (uint8_t*)mBuffer.raw + mConsumed * mInputFrameSize, + pBuffer->frameCount); + return OK; +} + +void AudioMixer::CopyBufferProvider::releaseBuffer(AudioBufferProvider::Buffer *pBuffer) +{ + //ALOGV("CopyBufferProvider(%p)::releaseBuffer(%p(%zu))", + // this, pBuffer, pBuffer->frameCount); + if (mLocalBufferFrameCount == 0) { + mTrackBufferProvider->releaseBuffer(pBuffer); + return; + } + // LOG_ALWAYS_FATAL_IF(pBuffer->frameCount == 0, "Invalid framecount"); + mConsumed += pBuffer->frameCount; // TODO: update for efficiency to reuse existing content + if (mConsumed != 0 && mConsumed >= mBuffer.frameCount) { + mTrackBufferProvider->releaseBuffer(&mBuffer); + ALOG_ASSERT(mBuffer.frameCount == 0); + } + pBuffer->raw = NULL; + pBuffer->frameCount = 0; +} + +void AudioMixer::CopyBufferProvider::reset() +{ + if (mBuffer.frameCount != 0) { + mTrackBufferProvider->releaseBuffer(&mBuffer); + } + mConsumed = 0; +} + +AudioMixer::DownmixerBufferProvider::DownmixerBufferProvider( + audio_channel_mask_t inputChannelMask, + audio_channel_mask_t outputChannelMask, audio_format_t format, + uint32_t sampleRate, int32_t sessionId, size_t bufferFrameCount) : + CopyBufferProvider( + audio_bytes_per_sample(format) * audio_channel_count_from_out_mask(inputChannelMask), + audio_bytes_per_sample(format) * audio_channel_count_from_out_mask(outputChannelMask), + bufferFrameCount) // set bufferFrameCount to 0 to do in-place +{ + ALOGV("DownmixerBufferProvider(%p)(%#x, %#x, %#x %u %d)", + this, inputChannelMask, outputChannelMask, format, + sampleRate, sessionId); + if (!sIsMultichannelCapable + || EffectCreate(&sDwnmFxDesc.uuid, + sessionId, + SESSION_ID_INVALID_AND_IGNORED, + &mDownmixHandle) != 0) { + ALOGE("DownmixerBufferProvider() error creating downmixer effect"); + mDownmixHandle = NULL; + return; + } + // channel input configuration will be overridden per-track + mDownmixConfig.inputCfg.channels = inputChannelMask; // FIXME: Should be bits + mDownmixConfig.outputCfg.channels = outputChannelMask; // FIXME: should be bits + mDownmixConfig.inputCfg.format = format; + mDownmixConfig.outputCfg.format = format; + mDownmixConfig.inputCfg.samplingRate = sampleRate; + mDownmixConfig.outputCfg.samplingRate = sampleRate; + mDownmixConfig.inputCfg.accessMode = EFFECT_BUFFER_ACCESS_READ; + mDownmixConfig.outputCfg.accessMode = EFFECT_BUFFER_ACCESS_WRITE; + // input and output buffer provider, and frame count will not be used as the downmix effect + // process() function is called directly (see DownmixerBufferProvider::getNextBuffer()) + mDownmixConfig.inputCfg.mask = EFFECT_CONFIG_SMP_RATE | EFFECT_CONFIG_CHANNELS | + EFFECT_CONFIG_FORMAT | EFFECT_CONFIG_ACC_MODE; + mDownmixConfig.outputCfg.mask = mDownmixConfig.inputCfg.mask; + + int cmdStatus; + uint32_t replySize = sizeof(int); + + // Configure downmixer + status_t status = (*mDownmixHandle)->command(mDownmixHandle, + EFFECT_CMD_SET_CONFIG /*cmdCode*/, sizeof(effect_config_t) /*cmdSize*/, + &mDownmixConfig /*pCmdData*/, + &replySize, &cmdStatus /*pReplyData*/); + if (status != 0 || cmdStatus != 0) { + ALOGE("DownmixerBufferProvider() error %d cmdStatus %d while configuring downmixer", + status, cmdStatus); + EffectRelease(mDownmixHandle); + mDownmixHandle = NULL; + return; + } + + // Enable downmixer + replySize = sizeof(int); + status = (*mDownmixHandle)->command(mDownmixHandle, + EFFECT_CMD_ENABLE /*cmdCode*/, 0 /*cmdSize*/, NULL /*pCmdData*/, + &replySize, &cmdStatus /*pReplyData*/); + if (status != 0 || cmdStatus != 0) { + ALOGE("DownmixerBufferProvider() error %d cmdStatus %d while enabling downmixer", + status, cmdStatus); + EffectRelease(mDownmixHandle); + mDownmixHandle = NULL; + return; + } + + // Set downmix type + // parameter size rounded for padding on 32bit boundary + const int psizePadded = ((sizeof(downmix_params_t) - 1)/sizeof(int) + 1) * sizeof(int); + const int downmixParamSize = + sizeof(effect_param_t) + psizePadded + sizeof(downmix_type_t); + effect_param_t * const param = (effect_param_t *) malloc(downmixParamSize); + param->psize = sizeof(downmix_params_t); + const downmix_params_t downmixParam = DOWNMIX_PARAM_TYPE; + memcpy(param->data, &downmixParam, param->psize); + const downmix_type_t downmixType = DOWNMIX_TYPE_FOLD; + param->vsize = sizeof(downmix_type_t); + memcpy(param->data + psizePadded, &downmixType, param->vsize); + replySize = sizeof(int); + status = (*mDownmixHandle)->command(mDownmixHandle, + EFFECT_CMD_SET_PARAM /* cmdCode */, downmixParamSize /* cmdSize */, + param /*pCmdData*/, &replySize, &cmdStatus /*pReplyData*/); + free(param); + if (status != 0 || cmdStatus != 0) { + ALOGE("DownmixerBufferProvider() error %d cmdStatus %d while setting downmix type", + status, cmdStatus); + EffectRelease(mDownmixHandle); + mDownmixHandle = NULL; + return; + } + ALOGV("DownmixerBufferProvider() downmix type set to %d", (int) downmixType); +} + +AudioMixer::DownmixerBufferProvider::~DownmixerBufferProvider() +{ + ALOGV("~DownmixerBufferProvider (%p)", this); + EffectRelease(mDownmixHandle); + mDownmixHandle = NULL; +} + +void AudioMixer::DownmixerBufferProvider::copyFrames(void *dst, const void *src, size_t frames) +{ + mDownmixConfig.inputCfg.buffer.frameCount = frames; + mDownmixConfig.inputCfg.buffer.raw = const_cast(src); + mDownmixConfig.outputCfg.buffer.frameCount = frames; + mDownmixConfig.outputCfg.buffer.raw = dst; + // may be in-place if src == dst. + status_t res = (*mDownmixHandle)->process(mDownmixHandle, + &mDownmixConfig.inputCfg.buffer, &mDownmixConfig.outputCfg.buffer); + ALOGE_IF(res != OK, "DownmixBufferProvider error %d", res); +} + +/* call once in a pthread_once handler. */ +/*static*/ status_t AudioMixer::DownmixerBufferProvider::init() +{ + // find multichannel downmix effect if we have to play multichannel content + uint32_t numEffects = 0; + int ret = EffectQueryNumberEffects(&numEffects); + if (ret != 0) { + ALOGE("AudioMixer() error %d querying number of effects", ret); + return NO_INIT; + } + ALOGV("EffectQueryNumberEffects() numEffects=%d", numEffects); + + for (uint32_t i = 0 ; i < numEffects ; i++) { + if (EffectQueryEffect(i, &sDwnmFxDesc) == 0) { + ALOGV("effect %d is called %s", i, sDwnmFxDesc.name); + if (memcmp(&sDwnmFxDesc.type, EFFECT_UIID_DOWNMIX, sizeof(effect_uuid_t)) == 0) { + ALOGI("found effect \"%s\" from %s", + sDwnmFxDesc.name, sDwnmFxDesc.implementor); + sIsMultichannelCapable = true; + break; + } + } + } + ALOGW_IF(!sIsMultichannelCapable, "unable to find downmix effect"); + return NO_INIT; +} + +/*static*/ bool AudioMixer::DownmixerBufferProvider::sIsMultichannelCapable = false; +/*static*/ effect_descriptor_t AudioMixer::DownmixerBufferProvider::sDwnmFxDesc; + +AudioMixer::RemixBufferProvider::RemixBufferProvider(audio_channel_mask_t inputChannelMask, + audio_channel_mask_t outputChannelMask, audio_format_t format, + size_t bufferFrameCount) : + CopyBufferProvider( + audio_bytes_per_sample(format) + * audio_channel_count_from_out_mask(inputChannelMask), + audio_bytes_per_sample(format) + * audio_channel_count_from_out_mask(outputChannelMask), + bufferFrameCount), + mFormat(format), + mSampleSize(audio_bytes_per_sample(format)), + mInputChannels(audio_channel_count_from_out_mask(inputChannelMask)), + mOutputChannels(audio_channel_count_from_out_mask(outputChannelMask)) +{ + ALOGV("RemixBufferProvider(%p)(%#x, %#x, %#x) %zu %zu", + this, format, inputChannelMask, outputChannelMask, + mInputChannels, mOutputChannels); + // TODO: consider channel representation in index array formulation + // We ignore channel representation, and just use the bits. + memcpy_by_index_array_initialization(mIdxAry, ARRAY_SIZE(mIdxAry), + audio_channel_mask_get_bits(outputChannelMask), + audio_channel_mask_get_bits(inputChannelMask)); +} + +void AudioMixer::RemixBufferProvider::copyFrames(void *dst, const void *src, size_t frames) +{ + memcpy_by_index_array(dst, mOutputChannels, + src, mInputChannels, mIdxAry, mSampleSize, frames); +} + +AudioMixer::ReformatBufferProvider::ReformatBufferProvider(int32_t channels, + audio_format_t inputFormat, audio_format_t outputFormat, + size_t bufferFrameCount) : + CopyBufferProvider( + channels * audio_bytes_per_sample(inputFormat), + channels * audio_bytes_per_sample(outputFormat), + bufferFrameCount), + mChannels(channels), + mInputFormat(inputFormat), + mOutputFormat(outputFormat) +{ + ALOGV("ReformatBufferProvider(%p)(%d, %#x, %#x)", this, channels, inputFormat, outputFormat); +} + +void AudioMixer::ReformatBufferProvider::copyFrames(void *dst, const void *src, size_t frames) +{ + memcpy_by_audio_format(dst, mOutputFormat, src, mInputFormat, frames * mChannels); +} + +// ---------------------------------------------------------------------------- + +// Ensure mConfiguredNames bitmask is initialized properly on all architectures. +// The value of 1 << x is undefined in C when x >= 32. + +AudioMixer::AudioMixer(size_t frameCount, uint32_t sampleRate, uint32_t maxNumTracks) + : mTrackNames(0), mConfiguredNames((maxNumTracks >= 32 ? 0 : 1 << maxNumTracks) - 1), + mSampleRate(sampleRate) +{ + ALOG_ASSERT(maxNumTracks <= MAX_NUM_TRACKS, "maxNumTracks %u > MAX_NUM_TRACKS %u", + maxNumTracks, MAX_NUM_TRACKS); + + // AudioMixer is not yet capable of more than 32 active track inputs + ALOG_ASSERT(32 >= MAX_NUM_TRACKS, "bad MAX_NUM_TRACKS %d", MAX_NUM_TRACKS); + + pthread_once(&sOnceControl, &sInitRoutine); + + mState.enabledTracks= 0; + mState.needsChanged = 0; + mState.frameCount = frameCount; + mState.hook = process__nop; + mState.outputTemp = NULL; + mState.resampleTemp = NULL; + mState.mLog = &mDummyLog; + // mState.reserved + + // FIXME Most of the following initialization is probably redundant since + // tracks[i] should only be referenced if (mTrackNames & (1 << i)) != 0 + // and mTrackNames is initially 0. However, leave it here until that's verified. + track_t* t = mState.tracks; + for (unsigned i=0 ; i < MAX_NUM_TRACKS ; i++) { + t->resampler = NULL; + t->downmixerBufferProvider = NULL; + t->mReformatBufferProvider = NULL; + t++; + } + +} + +AudioMixer::~AudioMixer() +{ + track_t* t = mState.tracks; + for (unsigned i=0 ; i < MAX_NUM_TRACKS ; i++) { + delete t->resampler; + delete t->downmixerBufferProvider; + delete t->mReformatBufferProvider; + t++; + } + delete [] mState.outputTemp; + delete [] mState.resampleTemp; +} + +void AudioMixer::setLog(NBLog::Writer *log) +{ + mState.mLog = log; +} + +int AudioMixer::getTrackName(audio_channel_mask_t channelMask, + audio_format_t format, int sessionId) +{ + if (!isValidPcmTrackFormat(format)) { + ALOGE("AudioMixer::getTrackName invalid format (%#x)", format); + return -1; + } + uint32_t names = (~mTrackNames) & mConfiguredNames; + if (names != 0) { + int n = __builtin_ctz(names); + ALOGV("add track (%d)", n); + // assume default parameters for the track, except where noted below + track_t* t = &mState.tracks[n]; + t->needs = 0; + + // Integer volume. + // Currently integer volume is kept for the legacy integer mixer. + // Will be removed when the legacy mixer path is removed. + t->volume[0] = UNITY_GAIN_INT; + t->volume[1] = UNITY_GAIN_INT; + t->prevVolume[0] = UNITY_GAIN_INT << 16; + t->prevVolume[1] = UNITY_GAIN_INT << 16; + t->volumeInc[0] = 0; + t->volumeInc[1] = 0; + t->auxLevel = 0; + t->auxInc = 0; + t->prevAuxLevel = 0; + + // Floating point volume. + t->mVolume[0] = UNITY_GAIN_FLOAT; + t->mVolume[1] = UNITY_GAIN_FLOAT; + t->mPrevVolume[0] = UNITY_GAIN_FLOAT; + t->mPrevVolume[1] = UNITY_GAIN_FLOAT; + t->mVolumeInc[0] = 0.; + t->mVolumeInc[1] = 0.; + t->mAuxLevel = 0.; + t->mAuxInc = 0.; + t->mPrevAuxLevel = 0.; + + // no initialization needed + // t->frameCount + t->channelCount = audio_channel_count_from_out_mask(channelMask); + t->enabled = false; + ALOGV_IF(audio_channel_mask_get_bits(channelMask) != AUDIO_CHANNEL_OUT_STEREO, + "Non-stereo channel mask: %d\n", channelMask); + t->channelMask = channelMask; + t->sessionId = sessionId; + // setBufferProvider(name, AudioBufferProvider *) is required before enable(name) + t->bufferProvider = NULL; + t->buffer.raw = NULL; + // no initialization needed + // t->buffer.frameCount + t->hook = NULL; + t->in = NULL; + t->resampler = NULL; + t->sampleRate = mSampleRate; + // setParameter(name, TRACK, MAIN_BUFFER, mixBuffer) is required before enable(name) + t->mainBuffer = NULL; + t->auxBuffer = NULL; + t->mInputBufferProvider = NULL; + t->mReformatBufferProvider = NULL; + t->downmixerBufferProvider = NULL; + t->mMixerFormat = AUDIO_FORMAT_PCM_16_BIT; + t->mFormat = format; + t->mMixerInFormat = kUseFloat && kUseNewMixer + ? AUDIO_FORMAT_PCM_FLOAT : AUDIO_FORMAT_PCM_16_BIT; + t->mMixerChannelMask = audio_channel_mask_from_representation_and_bits( + AUDIO_CHANNEL_REPRESENTATION_POSITION, AUDIO_CHANNEL_OUT_STEREO); + t->mMixerChannelCount = audio_channel_count_from_out_mask(t->mMixerChannelMask); + // Check the downmixing (or upmixing) requirements. + status_t status = initTrackDownmix(t, n); + if (status != OK) { + ALOGE("AudioMixer::getTrackName invalid channelMask (%#x)", channelMask); + return -1; + } + // initTrackDownmix() may change the input format requirement. + // If you desire floating point input to the mixer, it may change + // to integer because the downmixer requires integer to process. + ALOGVV("mMixerFormat:%#x mMixerInFormat:%#x\n", t->mMixerFormat, t->mMixerInFormat); + prepareTrackForReformat(t, n); + mTrackNames |= 1 << n; + return TRACK0 + n; + } + ALOGE("AudioMixer::getTrackName out of available tracks"); + return -1; +} + +void AudioMixer::invalidateState(uint32_t mask) +{ + if (mask != 0) { + mState.needsChanged |= mask; + mState.hook = process__validate; + } + } + +// Called when channel masks have changed for a track name +// TODO: Fix Downmixbufferprofider not to (possibly) change mixer input format, +// which will simplify this logic. +bool AudioMixer::setChannelMasks(int name, + audio_channel_mask_t trackChannelMask, audio_channel_mask_t mixerChannelMask) { + track_t &track = mState.tracks[name]; + + if (trackChannelMask == track.channelMask + && mixerChannelMask == track.mMixerChannelMask) { + return false; // no need to change + } + // always recompute for both channel masks even if only one has changed. + const uint32_t trackChannelCount = audio_channel_count_from_out_mask(trackChannelMask); + const uint32_t mixerChannelCount = audio_channel_count_from_out_mask(mixerChannelMask); + const bool mixerChannelCountChanged = track.mMixerChannelCount != mixerChannelCount; + + ALOG_ASSERT((trackChannelCount <= MAX_NUM_CHANNELS_TO_DOWNMIX) + && trackChannelCount + && mixerChannelCount); + track.channelMask = trackChannelMask; + track.channelCount = trackChannelCount; + track.mMixerChannelMask = mixerChannelMask; + track.mMixerChannelCount = mixerChannelCount; + + // channel masks have changed, does this track need a downmixer? + // update to try using our desired format (if we aren't already using it) + const audio_format_t prevMixerInFormat = track.mMixerInFormat; + track.mMixerInFormat = kUseFloat && kUseNewMixer + ? AUDIO_FORMAT_PCM_FLOAT : AUDIO_FORMAT_PCM_16_BIT; + const status_t status = initTrackDownmix(&mState.tracks[name], name); + ALOGE_IF(status != OK, + "initTrackDownmix error %d, track channel mask %#x, mixer channel mask %#x", + status, track.channelMask, track.mMixerChannelMask); + + const bool mixerInFormatChanged = prevMixerInFormat != track.mMixerInFormat; + if (mixerInFormatChanged) { + prepareTrackForReformat(&track, name); // because of downmixer, track format may change! + } + + if (track.resampler && (mixerInFormatChanged || mixerChannelCountChanged)) { + // resampler input format or channels may have changed. + const uint32_t resetToSampleRate = track.sampleRate; + delete track.resampler; + track.resampler = NULL; + track.sampleRate = mSampleRate; // without resampler, track rate is device sample rate. + // recreate the resampler with updated format, channels, saved sampleRate. + track.setResampler(resetToSampleRate /*trackSampleRate*/, mSampleRate /*devSampleRate*/); + } + return true; +} + +status_t AudioMixer::initTrackDownmix(track_t* pTrack, int trackName) +{ + // Only remix (upmix or downmix) if the track and mixer/device channel masks + // are not the same and not handled internally, as mono -> stereo currently is. + if (pTrack->channelMask != pTrack->mMixerChannelMask + && !(pTrack->channelMask == AUDIO_CHANNEL_OUT_MONO + && pTrack->mMixerChannelMask == AUDIO_CHANNEL_OUT_STEREO)) { + return prepareTrackForDownmix(pTrack, trackName); + } + // no remix necessary + unprepareTrackForDownmix(pTrack, trackName); + return NO_ERROR; +} + +void AudioMixer::unprepareTrackForDownmix(track_t* pTrack, int trackName __unused) { + ALOGV("AudioMixer::unprepareTrackForDownmix(%d)", trackName); + + if (pTrack->downmixerBufferProvider != NULL) { + // this track had previously been configured with a downmixer, delete it + ALOGV(" deleting old downmixer"); + delete pTrack->downmixerBufferProvider; + pTrack->downmixerBufferProvider = NULL; + reconfigureBufferProviders(pTrack); + } else { + ALOGV(" nothing to do, no downmixer to delete"); + } +} + +status_t AudioMixer::prepareTrackForDownmix(track_t* pTrack, int trackName) +{ + ALOGV("AudioMixer::prepareTrackForDownmix(%d) with mask 0x%x", trackName, pTrack->channelMask); + + // discard the previous downmixer if there was one + unprepareTrackForDownmix(pTrack, trackName); + if (DownmixerBufferProvider::isMultichannelCapable()) { + DownmixerBufferProvider* pDbp = new DownmixerBufferProvider(pTrack->channelMask, + pTrack->mMixerChannelMask, + AUDIO_FORMAT_PCM_16_BIT /* TODO: use pTrack->mMixerInFormat, now only PCM 16 */, + pTrack->sampleRate, pTrack->sessionId, kCopyBufferFrameCount); + + if (pDbp->isValid()) { // if constructor completed properly + pTrack->mMixerInFormat = AUDIO_FORMAT_PCM_16_BIT; // PCM 16 bit required for downmix + pTrack->downmixerBufferProvider = pDbp; + reconfigureBufferProviders(pTrack); + return NO_ERROR; + } + delete pDbp; + } + + // Effect downmixer does not accept the channel conversion. Let's use our remixer. + RemixBufferProvider* pRbp = new RemixBufferProvider(pTrack->channelMask, + pTrack->mMixerChannelMask, pTrack->mMixerInFormat, kCopyBufferFrameCount); + // Remix always finds a conversion whereas Downmixer effect above may fail. + pTrack->downmixerBufferProvider = pRbp; + reconfigureBufferProviders(pTrack); + return NO_ERROR; +} + +void AudioMixer::unprepareTrackForReformat(track_t* pTrack, int trackName __unused) { + ALOGV("AudioMixer::unprepareTrackForReformat(%d)", trackName); + if (pTrack->mReformatBufferProvider != NULL) { + delete pTrack->mReformatBufferProvider; + pTrack->mReformatBufferProvider = NULL; + reconfigureBufferProviders(pTrack); + } +} + +status_t AudioMixer::prepareTrackForReformat(track_t* pTrack, int trackName) +{ + ALOGV("AudioMixer::prepareTrackForReformat(%d) with format %#x", trackName, pTrack->mFormat); + // discard the previous reformatter if there was one + unprepareTrackForReformat(pTrack, trackName); + // only configure reformatter if needed + if (pTrack->mFormat != pTrack->mMixerInFormat) { + pTrack->mReformatBufferProvider = new ReformatBufferProvider( + audio_channel_count_from_out_mask(pTrack->channelMask), + pTrack->mFormat, pTrack->mMixerInFormat, + kCopyBufferFrameCount); + reconfigureBufferProviders(pTrack); + } + return NO_ERROR; +} + +void AudioMixer::reconfigureBufferProviders(track_t* pTrack) +{ + pTrack->bufferProvider = pTrack->mInputBufferProvider; + if (pTrack->mReformatBufferProvider) { + pTrack->mReformatBufferProvider->setBufferProvider(pTrack->bufferProvider); + pTrack->bufferProvider = pTrack->mReformatBufferProvider; + } + if (pTrack->downmixerBufferProvider) { + pTrack->downmixerBufferProvider->setBufferProvider(pTrack->bufferProvider); + pTrack->bufferProvider = pTrack->downmixerBufferProvider; + } +} + +void AudioMixer::deleteTrackName(int name) +{ + ALOGV("AudioMixer::deleteTrackName(%d)", name); + name -= TRACK0; + ALOG_ASSERT(uint32_t(name) < MAX_NUM_TRACKS, "bad track name %d", name); + ALOGV("deleteTrackName(%d)", name); + track_t& track(mState.tracks[ name ]); + if (track.enabled) { + track.enabled = false; + invalidateState(1< AudioMixer::UNITY_GAIN_INT) { + intVolume = AudioMixer::UNITY_GAIN_INT; + } else if (intVolume < 0) { + ALOGE("negative volume %.7g", newVolume); + intVolume = 0; // should never happen, but for safety check. + } + if (intVolume == *pIntSetVolume) { + *pIntVolumeInc = 0; + /* TODO: integer/float workaround: ignore floating volume ramp */ + *pVolumeInc = 0; + *pPrevVolume = newVolume; + return true; + } + if (ramp != 0) { + *pIntVolumeInc = ((intVolume - *pIntSetVolume) << 16) / ramp; + *pIntPrevVolume = (*pIntVolumeInc == 0 ? intVolume : *pIntSetVolume) << 16; + } else { + *pIntVolumeInc = 0; + *pIntPrevVolume = intVolume << 16; + } + *pIntSetVolume = intVolume; + return true; +} + +void AudioMixer::setParameter(int name, int target, int param, void *value) +{ + name -= TRACK0; + ALOG_ASSERT(uint32_t(name) < MAX_NUM_TRACKS, "bad track name %d", name); + track_t& track = mState.tracks[name]; + + int valueInt = static_cast(reinterpret_cast(value)); + int32_t *valueBuf = reinterpret_cast(value); + + switch (target) { + + case TRACK: + switch (param) { + case CHANNEL_MASK: { + const audio_channel_mask_t trackChannelMask = + static_cast(valueInt); + if (setChannelMasks(name, trackChannelMask, track.mMixerChannelMask)) { + ALOGV("setParameter(TRACK, CHANNEL_MASK, %x)", trackChannelMask); + invalidateState(1 << name); + } + } break; + case MAIN_BUFFER: + if (track.mainBuffer != valueBuf) { + track.mainBuffer = valueBuf; + ALOGV("setParameter(TRACK, MAIN_BUFFER, %p)", valueBuf); + invalidateState(1 << name); + } + break; + case AUX_BUFFER: + if (track.auxBuffer != valueBuf) { + track.auxBuffer = valueBuf; + ALOGV("setParameter(TRACK, AUX_BUFFER, %p)", valueBuf); + invalidateState(1 << name); + } + break; + case FORMAT: { + audio_format_t format = static_cast(valueInt); + if (track.mFormat != format) { + ALOG_ASSERT(audio_is_linear_pcm(format), "Invalid format %#x", format); + track.mFormat = format; + ALOGV("setParameter(TRACK, FORMAT, %#x)", format); + prepareTrackForReformat(&track, name); + invalidateState(1 << name); + } + } break; + // FIXME do we want to support setting the downmix type from AudioFlinger? + // for a specific track? or per mixer? + /* case DOWNMIX_TYPE: + break */ + case MIXER_FORMAT: { + audio_format_t format = static_cast(valueInt); + if (track.mMixerFormat != format) { + track.mMixerFormat = format; + ALOGV("setParameter(TRACK, MIXER_FORMAT, %#x)", format); + } + } break; + case MIXER_CHANNEL_MASK: { + const audio_channel_mask_t mixerChannelMask = + static_cast(valueInt); + if (setChannelMasks(name, track.channelMask, mixerChannelMask)) { + ALOGV("setParameter(TRACK, MIXER_CHANNEL_MASK, %#x)", mixerChannelMask); + invalidateState(1 << name); + } + } break; + default: + LOG_ALWAYS_FATAL("setParameter track: bad param %d", param); + } + break; + + case RESAMPLE: + switch (param) { + case SAMPLE_RATE: + ALOG_ASSERT(valueInt > 0, "bad sample rate %d", valueInt); + if (track.setResampler(uint32_t(valueInt), mSampleRate)) { + ALOGV("setParameter(RESAMPLE, SAMPLE_RATE, %u)", + uint32_t(valueInt)); + invalidateState(1 << name); + } + break; + case RESET: + track.resetResampler(); + invalidateState(1 << name); + break; + case REMOVE: + delete track.resampler; + track.resampler = NULL; + track.sampleRate = mSampleRate; + invalidateState(1 << name); + break; + default: + LOG_ALWAYS_FATAL("setParameter resample: bad param %d", param); + } + break; + + case RAMP_VOLUME: + case VOLUME: + switch (param) { + case AUXLEVEL: + if (setVolumeRampVariables(*reinterpret_cast(value), + target == RAMP_VOLUME ? mState.frameCount : 0, + &track.auxLevel, &track.prevAuxLevel, &track.auxInc, + &track.mAuxLevel, &track.mPrevAuxLevel, &track.mAuxInc)) { + ALOGV("setParameter(%s, AUXLEVEL: %04x)", + target == VOLUME ? "VOLUME" : "RAMP_VOLUME", track.auxLevel); + invalidateState(1 << name); + } + break; + default: + if ((unsigned)param >= VOLUME0 && (unsigned)param < VOLUME0 + MAX_NUM_VOLUMES) { + if (setVolumeRampVariables(*reinterpret_cast(value), + target == RAMP_VOLUME ? mState.frameCount : 0, + &track.volume[param - VOLUME0], &track.prevVolume[param - VOLUME0], + &track.volumeInc[param - VOLUME0], + &track.mVolume[param - VOLUME0], &track.mPrevVolume[param - VOLUME0], + &track.mVolumeInc[param - VOLUME0])) { + ALOGV("setParameter(%s, VOLUME%d: %04x)", + target == VOLUME ? "VOLUME" : "RAMP_VOLUME", param - VOLUME0, + track.volume[param - VOLUME0]); + invalidateState(1 << name); + } + } else { + LOG_ALWAYS_FATAL("setParameter volume: bad param %d", param); + } + } + break; + + default: + LOG_ALWAYS_FATAL("setParameter: bad target %d", target); + } +} + +bool AudioMixer::track_t::setResampler(uint32_t trackSampleRate, uint32_t devSampleRate) +{ + if (trackSampleRate != devSampleRate || resampler != NULL) { + if (sampleRate != trackSampleRate) { + sampleRate = trackSampleRate; + if (resampler == NULL) { + ALOGV("Creating resampler from track %d Hz to device %d Hz", + trackSampleRate, devSampleRate); + AudioResampler::src_quality quality; + // force lowest quality level resampler if use case isn't music or video + // FIXME this is flawed for dynamic sample rates, as we choose the resampler + // quality level based on the initial ratio, but that could change later. + // Should have a way to distinguish tracks with static ratios vs. dynamic ratios. + if (!((trackSampleRate == 44100 && devSampleRate == 48000) || + (trackSampleRate == 48000 && devSampleRate == 44100))) { + quality = AudioResampler::DYN_LOW_QUALITY; + } else { + quality = AudioResampler::DEFAULT_QUALITY; + } + + // TODO: Remove MONO_HACK. Resampler sees #channels after the downmixer + // but if none exists, it is the channel count (1 for mono). + const int resamplerChannelCount = downmixerBufferProvider != NULL + ? mMixerChannelCount : channelCount; + ALOGVV("Creating resampler:" + " format(%#x) channels(%d) devSampleRate(%u) quality(%d)\n", + mMixerInFormat, resamplerChannelCount, devSampleRate, quality); + resampler = AudioResampler::create( + mMixerInFormat, + resamplerChannelCount, + devSampleRate, quality); + resampler->setLocalTimeFreq(sLocalTimeFreq); + } + return true; + } + } + return false; +} + +/* Checks to see if the volume ramp has completed and clears the increment + * variables appropriately. + * + * FIXME: There is code to handle int/float ramp variable switchover should it not + * complete within a mixer buffer processing call, but it is preferred to avoid switchover + * due to precision issues. The switchover code is included for legacy code purposes + * and can be removed once the integer volume is removed. + * + * It is not sufficient to clear only the volumeInc integer variable because + * if one channel requires ramping, all channels are ramped. + * + * There is a bit of duplicated code here, but it keeps backward compatibility. + */ +inline void AudioMixer::track_t::adjustVolumeRamp(bool aux, bool useFloat) +{ + if (useFloat) { + for (uint32_t i = 0; i < MAX_NUM_VOLUMES; i++) { + if (mVolumeInc[i] != 0 && fabs(mVolume[i] - mPrevVolume[i]) <= fabs(mVolumeInc[i])) { + volumeInc[i] = 0; + prevVolume[i] = volume[i] << 16; + mVolumeInc[i] = 0.; + mPrevVolume[i] = mVolume[i]; + } else { + //ALOGV("ramp: %f %f %f", mVolume[i], mPrevVolume[i], mVolumeInc[i]); + prevVolume[i] = u4_28_from_float(mPrevVolume[i]); + } + } + } else { + for (uint32_t i = 0; i < MAX_NUM_VOLUMES; i++) { + if (((volumeInc[i]>0) && (((prevVolume[i]+volumeInc[i])>>16) >= volume[i])) || + ((volumeInc[i]<0) && (((prevVolume[i]+volumeInc[i])>>16) <= volume[i]))) { + volumeInc[i] = 0; + prevVolume[i] = volume[i] << 16; + mVolumeInc[i] = 0.; + mPrevVolume[i] = mVolume[i]; + } else { + //ALOGV("ramp: %d %d %d", volume[i] << 16, prevVolume[i], volumeInc[i]); + mPrevVolume[i] = float_from_u4_28(prevVolume[i]); + } + } + } + /* TODO: aux is always integer regardless of output buffer type */ + if (aux) { + if (((auxInc>0) && (((prevAuxLevel+auxInc)>>16) >= auxLevel)) || + ((auxInc<0) && (((prevAuxLevel+auxInc)>>16) <= auxLevel))) { + auxInc = 0; + prevAuxLevel = auxLevel << 16; + mAuxInc = 0.; + mPrevAuxLevel = mAuxLevel; + } else { + //ALOGV("aux ramp: %d %d %d", auxLevel << 16, prevAuxLevel, auxInc); + } + } +} + +size_t AudioMixer::getUnreleasedFrames(int name) const +{ + name -= TRACK0; + if (uint32_t(name) < MAX_NUM_TRACKS) { + return mState.tracks[name].getUnreleasedFrames(); + } + return 0; +} + +void AudioMixer::setBufferProvider(int name, AudioBufferProvider* bufferProvider) +{ + name -= TRACK0; + ALOG_ASSERT(uint32_t(name) < MAX_NUM_TRACKS, "bad track name %d", name); + + if (mState.tracks[name].mInputBufferProvider == bufferProvider) { + return; // don't reset any buffer providers if identical. + } + if (mState.tracks[name].mReformatBufferProvider != NULL) { + mState.tracks[name].mReformatBufferProvider->reset(); + } else if (mState.tracks[name].downmixerBufferProvider != NULL) { + } + + mState.tracks[name].mInputBufferProvider = bufferProvider; + reconfigureBufferProviders(&mState.tracks[name]); +} + + +void AudioMixer::process(int64_t pts) +{ + mState.hook(&mState, pts); +} + + +void AudioMixer::process__validate(state_t* state, int64_t pts) +{ + ALOGW_IF(!state->needsChanged, + "in process__validate() but nothing's invalid"); + + uint32_t changed = state->needsChanged; + state->needsChanged = 0; // clear the validation flag + + // recompute which tracks are enabled / disabled + uint32_t enabled = 0; + uint32_t disabled = 0; + while (changed) { + const int i = 31 - __builtin_clz(changed); + const uint32_t mask = 1<tracks[i]; + (t.enabled ? enabled : disabled) |= mask; + } + state->enabledTracks &= ~disabled; + state->enabledTracks |= enabled; + + // compute everything we need... + int countActiveTracks = 0; + // TODO: fix all16BitsStereNoResample logic to + // either properly handle muted tracks (it should ignore them) + // or remove altogether as an obsolete optimization. + bool all16BitsStereoNoResample = true; + bool resampling = false; + bool volumeRamp = false; + uint32_t en = state->enabledTracks; + while (en) { + const int i = 31 - __builtin_clz(en); + en &= ~(1<tracks[i]; + uint32_t n = 0; + // FIXME can overflow (mask is only 3 bits) + n |= NEEDS_CHANNEL_1 + t.channelCount - 1; + if (t.doesResample()) { + n |= NEEDS_RESAMPLE; + } + if (t.auxLevel != 0 && t.auxBuffer != NULL) { + n |= NEEDS_AUX; + } + + if (t.volumeInc[0]|t.volumeInc[1]) { + volumeRamp = true; + } else if (!t.doesResample() && t.volumeRL == 0) { + n |= NEEDS_MUTE; + } + t.needs = n; + + if (n & NEEDS_MUTE) { + t.hook = track__nop; + } else { + if (n & NEEDS_AUX) { + all16BitsStereoNoResample = false; + } + if (n & NEEDS_RESAMPLE) { + all16BitsStereoNoResample = false; + resampling = true; + t.hook = getTrackHook(TRACKTYPE_RESAMPLE, t.mMixerChannelCount, + t.mMixerInFormat, t.mMixerFormat); + ALOGV_IF((n & NEEDS_CHANNEL_COUNT__MASK) > NEEDS_CHANNEL_2, + "Track %d needs downmix + resample", i); + } else { + if ((n & NEEDS_CHANNEL_COUNT__MASK) == NEEDS_CHANNEL_1){ + t.hook = getTrackHook( + t.mMixerChannelCount == 2 // TODO: MONO_HACK. + ? TRACKTYPE_NORESAMPLEMONO : TRACKTYPE_NORESAMPLE, + t.mMixerChannelCount, + t.mMixerInFormat, t.mMixerFormat); + all16BitsStereoNoResample = false; + } + if ((n & NEEDS_CHANNEL_COUNT__MASK) >= NEEDS_CHANNEL_2){ + t.hook = getTrackHook(TRACKTYPE_NORESAMPLE, t.mMixerChannelCount, + t.mMixerInFormat, t.mMixerFormat); + ALOGV_IF((n & NEEDS_CHANNEL_COUNT__MASK) > NEEDS_CHANNEL_2, + "Track %d needs downmix", i); + } + } + } + } + + // select the processing hooks + state->hook = process__nop; + if (countActiveTracks > 0) { + if (resampling) { + if (!state->outputTemp) { + state->outputTemp = new int32_t[MAX_NUM_CHANNELS * state->frameCount]; + } + if (!state->resampleTemp) { + state->resampleTemp = new int32_t[MAX_NUM_CHANNELS * state->frameCount]; + } + state->hook = process__genericResampling; + } else { + if (state->outputTemp) { + delete [] state->outputTemp; + state->outputTemp = NULL; + } + if (state->resampleTemp) { + delete [] state->resampleTemp; + state->resampleTemp = NULL; + } + state->hook = process__genericNoResampling; + if (all16BitsStereoNoResample && !volumeRamp) { + if (countActiveTracks == 1) { + const int i = 31 - __builtin_clz(state->enabledTracks); + track_t& t = state->tracks[i]; + if ((t.needs & NEEDS_MUTE) == 0) { + // The check prevents a muted track from acquiring a process hook. + // + // This is dangerous if the track is MONO as that requires + // special case handling due to implicit channel duplication. + // Stereo or Multichannel should actually be fine here. + state->hook = getProcessHook(PROCESSTYPE_NORESAMPLEONETRACK, + t.mMixerChannelCount, t.mMixerInFormat, t.mMixerFormat); + } + } + } + } + } + + ALOGV("mixer configuration change: %d activeTracks (%08x) " + "all16BitsStereoNoResample=%d, resampling=%d, volumeRamp=%d", + countActiveTracks, state->enabledTracks, + all16BitsStereoNoResample, resampling, volumeRamp); + + state->hook(state, pts); + + // Now that the volume ramp has been done, set optimal state and + // track hooks for subsequent mixer process + if (countActiveTracks > 0) { + bool allMuted = true; + uint32_t en = state->enabledTracks; + while (en) { + const int i = 31 - __builtin_clz(en); + en &= ~(1<tracks[i]; + if (!t.doesResample() && t.volumeRL == 0) { + t.needs |= NEEDS_MUTE; + t.hook = track__nop; + } else { + allMuted = false; + } + } + if (allMuted) { + state->hook = process__nop; + } else if (all16BitsStereoNoResample) { + if (countActiveTracks == 1) { + const int i = 31 - __builtin_clz(state->enabledTracks); + track_t& t = state->tracks[i]; + // Muted single tracks handled by allMuted above. + state->hook = getProcessHook(PROCESSTYPE_NORESAMPLEONETRACK, + t.mMixerChannelCount, t.mMixerInFormat, t.mMixerFormat); + } + } + } +} + + +void AudioMixer::track__genericResample(track_t* t, int32_t* out, size_t outFrameCount, + int32_t* temp, int32_t* aux) +{ + ALOGVV("track__genericResample\n"); + t->resampler->setSampleRate(t->sampleRate); + + // ramp gain - resample to temp buffer and scale/mix in 2nd step + if (aux != NULL) { + // always resample with unity gain when sending to auxiliary buffer to be able + // to apply send level after resampling + t->resampler->setVolume(UNITY_GAIN_FLOAT, UNITY_GAIN_FLOAT); + memset(temp, 0, outFrameCount * t->mMixerChannelCount * sizeof(int32_t)); + t->resampler->resample(temp, outFrameCount, t->bufferProvider); + if (CC_UNLIKELY(t->volumeInc[0]|t->volumeInc[1]|t->auxInc)) { + volumeRampStereo(t, out, outFrameCount, temp, aux); + } else { + volumeStereo(t, out, outFrameCount, temp, aux); + } + } else { + if (CC_UNLIKELY(t->volumeInc[0]|t->volumeInc[1])) { + t->resampler->setVolume(UNITY_GAIN_FLOAT, UNITY_GAIN_FLOAT); + memset(temp, 0, outFrameCount * MAX_NUM_CHANNELS * sizeof(int32_t)); + t->resampler->resample(temp, outFrameCount, t->bufferProvider); + volumeRampStereo(t, out, outFrameCount, temp, aux); + } + + // constant gain + else { + t->resampler->setVolume(t->mVolume[0], t->mVolume[1]); + t->resampler->resample(out, outFrameCount, t->bufferProvider); + } + } +} + +void AudioMixer::track__nop(track_t* t __unused, int32_t* out __unused, + size_t outFrameCount __unused, int32_t* temp __unused, int32_t* aux __unused) +{ +} + +void AudioMixer::volumeRampStereo(track_t* t, int32_t* out, size_t frameCount, int32_t* temp, + int32_t* aux) +{ + int32_t vl = t->prevVolume[0]; + int32_t vr = t->prevVolume[1]; + const int32_t vlInc = t->volumeInc[0]; + const int32_t vrInc = t->volumeInc[1]; + + //ALOGD("[0] %p: inc=%f, v0=%f, v1=%d, final=%f, count=%d", + // t, vlInc/65536.0f, vl/65536.0f, t->volume[0], + // (vl + vlInc*frameCount)/65536.0f, frameCount); + + // ramp volume + if (CC_UNLIKELY(aux != NULL)) { + int32_t va = t->prevAuxLevel; + const int32_t vaInc = t->auxInc; + int32_t l; + int32_t r; + + do { + l = (*temp++ >> 12); + r = (*temp++ >> 12); + *out++ += (vl >> 16) * l; + *out++ += (vr >> 16) * r; + *aux++ += (va >> 17) * (l + r); + vl += vlInc; + vr += vrInc; + va += vaInc; + } while (--frameCount); + t->prevAuxLevel = va; + } else { + do { + *out++ += (vl >> 16) * (*temp++ >> 12); + *out++ += (vr >> 16) * (*temp++ >> 12); + vl += vlInc; + vr += vrInc; + } while (--frameCount); + } + t->prevVolume[0] = vl; + t->prevVolume[1] = vr; + t->adjustVolumeRamp(aux != NULL); +} + +void AudioMixer::volumeStereo(track_t* t, int32_t* out, size_t frameCount, int32_t* temp, + int32_t* aux) +{ + const int16_t vl = t->volume[0]; + const int16_t vr = t->volume[1]; + + if (CC_UNLIKELY(aux != NULL)) { + const int16_t va = t->auxLevel; + do { + int16_t l = (int16_t)(*temp++ >> 12); + int16_t r = (int16_t)(*temp++ >> 12); + out[0] = mulAdd(l, vl, out[0]); + int16_t a = (int16_t)(((int32_t)l + r) >> 1); + out[1] = mulAdd(r, vr, out[1]); + out += 2; + aux[0] = mulAdd(a, va, aux[0]); + aux++; + } while (--frameCount); + } else { + do { + int16_t l = (int16_t)(*temp++ >> 12); + int16_t r = (int16_t)(*temp++ >> 12); + out[0] = mulAdd(l, vl, out[0]); + out[1] = mulAdd(r, vr, out[1]); + out += 2; + } while (--frameCount); + } +} + +void AudioMixer::track__16BitsStereo(track_t* t, int32_t* out, size_t frameCount, + int32_t* temp __unused, int32_t* aux) +{ + ALOGVV("track__16BitsStereo\n"); + const int16_t *in = static_cast(t->in); + + if (CC_UNLIKELY(aux != NULL)) { + int32_t l; + int32_t r; + // ramp gain + if (CC_UNLIKELY(t->volumeInc[0]|t->volumeInc[1]|t->auxInc)) { + int32_t vl = t->prevVolume[0]; + int32_t vr = t->prevVolume[1]; + int32_t va = t->prevAuxLevel; + const int32_t vlInc = t->volumeInc[0]; + const int32_t vrInc = t->volumeInc[1]; + const int32_t vaInc = t->auxInc; + // ALOGD("[1] %p: inc=%f, v0=%f, v1=%d, final=%f, count=%d", + // t, vlInc/65536.0f, vl/65536.0f, t->volume[0], + // (vl + vlInc*frameCount)/65536.0f, frameCount); + + do { + l = (int32_t)*in++; + r = (int32_t)*in++; + *out++ += (vl >> 16) * l; + *out++ += (vr >> 16) * r; + *aux++ += (va >> 17) * (l + r); + vl += vlInc; + vr += vrInc; + va += vaInc; + } while (--frameCount); + + t->prevVolume[0] = vl; + t->prevVolume[1] = vr; + t->prevAuxLevel = va; + t->adjustVolumeRamp(true); + } + + // constant gain + else { + const uint32_t vrl = t->volumeRL; + const int16_t va = (int16_t)t->auxLevel; + do { + uint32_t rl = *reinterpret_cast(in); + int16_t a = (int16_t)(((int32_t)in[0] + in[1]) >> 1); + in += 2; + out[0] = mulAddRL(1, rl, vrl, out[0]); + out[1] = mulAddRL(0, rl, vrl, out[1]); + out += 2; + aux[0] = mulAdd(a, va, aux[0]); + aux++; + } while (--frameCount); + } + } else { + // ramp gain + if (CC_UNLIKELY(t->volumeInc[0]|t->volumeInc[1])) { + int32_t vl = t->prevVolume[0]; + int32_t vr = t->prevVolume[1]; + const int32_t vlInc = t->volumeInc[0]; + const int32_t vrInc = t->volumeInc[1]; + + // ALOGD("[1] %p: inc=%f, v0=%f, v1=%d, final=%f, count=%d", + // t, vlInc/65536.0f, vl/65536.0f, t->volume[0], + // (vl + vlInc*frameCount)/65536.0f, frameCount); + + do { + *out++ += (vl >> 16) * (int32_t) *in++; + *out++ += (vr >> 16) * (int32_t) *in++; + vl += vlInc; + vr += vrInc; + } while (--frameCount); + + t->prevVolume[0] = vl; + t->prevVolume[1] = vr; + t->adjustVolumeRamp(false); + } + + // constant gain + else { + const uint32_t vrl = t->volumeRL; + do { + uint32_t rl = *reinterpret_cast(in); + in += 2; + out[0] = mulAddRL(1, rl, vrl, out[0]); + out[1] = mulAddRL(0, rl, vrl, out[1]); + out += 2; + } while (--frameCount); + } + } + t->in = in; +} + +void AudioMixer::track__16BitsMono(track_t* t, int32_t* out, size_t frameCount, + int32_t* temp __unused, int32_t* aux) +{ + ALOGVV("track__16BitsMono\n"); + const int16_t *in = static_cast(t->in); + + if (CC_UNLIKELY(aux != NULL)) { + // ramp gain + if (CC_UNLIKELY(t->volumeInc[0]|t->volumeInc[1]|t->auxInc)) { + int32_t vl = t->prevVolume[0]; + int32_t vr = t->prevVolume[1]; + int32_t va = t->prevAuxLevel; + const int32_t vlInc = t->volumeInc[0]; + const int32_t vrInc = t->volumeInc[1]; + const int32_t vaInc = t->auxInc; + + // ALOGD("[2] %p: inc=%f, v0=%f, v1=%d, final=%f, count=%d", + // t, vlInc/65536.0f, vl/65536.0f, t->volume[0], + // (vl + vlInc*frameCount)/65536.0f, frameCount); + + do { + int32_t l = *in++; + *out++ += (vl >> 16) * l; + *out++ += (vr >> 16) * l; + *aux++ += (va >> 16) * l; + vl += vlInc; + vr += vrInc; + va += vaInc; + } while (--frameCount); + + t->prevVolume[0] = vl; + t->prevVolume[1] = vr; + t->prevAuxLevel = va; + t->adjustVolumeRamp(true); + } + // constant gain + else { + const int16_t vl = t->volume[0]; + const int16_t vr = t->volume[1]; + const int16_t va = (int16_t)t->auxLevel; + do { + int16_t l = *in++; + out[0] = mulAdd(l, vl, out[0]); + out[1] = mulAdd(l, vr, out[1]); + out += 2; + aux[0] = mulAdd(l, va, aux[0]); + aux++; + } while (--frameCount); + } + } else { + // ramp gain + if (CC_UNLIKELY(t->volumeInc[0]|t->volumeInc[1])) { + int32_t vl = t->prevVolume[0]; + int32_t vr = t->prevVolume[1]; + const int32_t vlInc = t->volumeInc[0]; + const int32_t vrInc = t->volumeInc[1]; + + // ALOGD("[2] %p: inc=%f, v0=%f, v1=%d, final=%f, count=%d", + // t, vlInc/65536.0f, vl/65536.0f, t->volume[0], + // (vl + vlInc*frameCount)/65536.0f, frameCount); + + do { + int32_t l = *in++; + *out++ += (vl >> 16) * l; + *out++ += (vr >> 16) * l; + vl += vlInc; + vr += vrInc; + } while (--frameCount); + + t->prevVolume[0] = vl; + t->prevVolume[1] = vr; + t->adjustVolumeRamp(false); + } + // constant gain + else { + const int16_t vl = t->volume[0]; + const int16_t vr = t->volume[1]; + do { + int16_t l = *in++; + out[0] = mulAdd(l, vl, out[0]); + out[1] = mulAdd(l, vr, out[1]); + out += 2; + } while (--frameCount); + } + } + t->in = in; +} + +// no-op case +void AudioMixer::process__nop(state_t* state, int64_t pts) +{ + ALOGVV("process__nop\n"); + uint32_t e0 = state->enabledTracks; + while (e0) { + // process by group of tracks with same output buffer to + // avoid multiple memset() on same buffer + uint32_t e1 = e0, e2 = e0; + int i = 31 - __builtin_clz(e1); + { + track_t& t1 = state->tracks[i]; + e2 &= ~(1<tracks[i]; + if (CC_UNLIKELY(t2.mainBuffer != t1.mainBuffer)) { + e1 &= ~(1<frameCount * t1.mMixerChannelCount + * audio_bytes_per_sample(t1.mMixerFormat)); + } + + while (e1) { + i = 31 - __builtin_clz(e1); + e1 &= ~(1<tracks[i]; + size_t outFrames = state->frameCount; + while (outFrames) { + t3.buffer.frameCount = outFrames; + int64_t outputPTS = calculateOutputPTS( + t3, pts, state->frameCount - outFrames); + t3.bufferProvider->getNextBuffer(&t3.buffer, outputPTS); + if (t3.buffer.raw == NULL) break; + outFrames -= t3.buffer.frameCount; + t3.bufferProvider->releaseBuffer(&t3.buffer); + } + } + } + } +} + +// generic code without resampling +void AudioMixer::process__genericNoResampling(state_t* state, int64_t pts) +{ + ALOGVV("process__genericNoResampling\n"); + int32_t outTemp[BLOCKSIZE * MAX_NUM_CHANNELS] __attribute__((aligned(32))); + + // acquire each track's buffer + uint32_t enabledTracks = state->enabledTracks; + uint32_t e0 = enabledTracks; + while (e0) { + const int i = 31 - __builtin_clz(e0); + e0 &= ~(1<tracks[i]; + t.buffer.frameCount = state->frameCount; + t.bufferProvider->getNextBuffer(&t.buffer, pts); + t.frameCount = t.buffer.frameCount; + t.in = t.buffer.raw; + } + + e0 = enabledTracks; + while (e0) { + // process by group of tracks with same output buffer to + // optimize cache use + uint32_t e1 = e0, e2 = e0; + int j = 31 - __builtin_clz(e1); + track_t& t1 = state->tracks[j]; + e2 &= ~(1<tracks[j]; + if (CC_UNLIKELY(t2.mainBuffer != t1.mainBuffer)) { + e1 &= ~(1<tracks[i]; + size_t outFrames = BLOCKSIZE; + int32_t *aux = NULL; + if (CC_UNLIKELY(t.needs & NEEDS_AUX)) { + aux = t.auxBuffer + numFrames; + } + while (outFrames) { + // t.in == NULL can happen if the track was flushed just after having + // been enabled for mixing. + if (t.in == NULL) { + enabledTracks &= ~(1< outFrames)?outFrames:t.frameCount; + if (inFrames > 0) { + t.hook(&t, outTemp + (BLOCKSIZE - outFrames) * t.mMixerChannelCount, + inFrames, state->resampleTemp, aux); + t.frameCount -= inFrames; + outFrames -= inFrames; + if (CC_UNLIKELY(aux != NULL)) { + aux += inFrames; + } + } + if (t.frameCount == 0 && outFrames) { + t.bufferProvider->releaseBuffer(&t.buffer); + t.buffer.frameCount = (state->frameCount - numFrames) - + (BLOCKSIZE - outFrames); + int64_t outputPTS = calculateOutputPTS( + t, pts, numFrames + (BLOCKSIZE - outFrames)); + t.bufferProvider->getNextBuffer(&t.buffer, outputPTS); + t.in = t.buffer.raw; + if (t.in == NULL) { + enabledTracks &= ~(1<((uint8_t*)out + + BLOCKSIZE * t1.mMixerChannelCount + * audio_bytes_per_sample(t1.mMixerFormat)); + numFrames += BLOCKSIZE; + } while (numFrames < state->frameCount); + } + + // release each track's buffer + e0 = enabledTracks; + while (e0) { + const int i = 31 - __builtin_clz(e0); + e0 &= ~(1<tracks[i]; + t.bufferProvider->releaseBuffer(&t.buffer); + } +} + + +// generic code with resampling +void AudioMixer::process__genericResampling(state_t* state, int64_t pts) +{ + ALOGVV("process__genericResampling\n"); + // this const just means that local variable outTemp doesn't change + int32_t* const outTemp = state->outputTemp; + size_t numFrames = state->frameCount; + + uint32_t e0 = state->enabledTracks; + while (e0) { + // process by group of tracks with same output buffer + // to optimize cache use + uint32_t e1 = e0, e2 = e0; + int j = 31 - __builtin_clz(e1); + track_t& t1 = state->tracks[j]; + e2 &= ~(1<tracks[j]; + if (CC_UNLIKELY(t2.mainBuffer != t1.mainBuffer)) { + e1 &= ~(1<frameCount); + while (e1) { + const int i = 31 - __builtin_clz(e1); + e1 &= ~(1<tracks[i]; + int32_t *aux = NULL; + if (CC_UNLIKELY(t.needs & NEEDS_AUX)) { + aux = t.auxBuffer; + } + + // this is a little goofy, on the resampling case we don't + // acquire/release the buffers because it's done by + // the resampler. + if (t.needs & NEEDS_RESAMPLE) { + t.resampler->setPTS(pts); + t.hook(&t, outTemp, numFrames, state->resampleTemp, aux); + } else { + + size_t outFrames = 0; + + while (outFrames < numFrames) { + t.buffer.frameCount = numFrames - outFrames; + int64_t outputPTS = calculateOutputPTS(t, pts, outFrames); + t.bufferProvider->getNextBuffer(&t.buffer, outputPTS); + t.in = t.buffer.raw; + // t.in == NULL can happen if the track was flushed just after having + // been enabled for mixing. + if (t.in == NULL) break; + + if (CC_UNLIKELY(aux != NULL)) { + aux += outFrames; + } + t.hook(&t, outTemp + outFrames * t.mMixerChannelCount, t.buffer.frameCount, + state->resampleTemp, aux); + outFrames += t.buffer.frameCount; + t.bufferProvider->releaseBuffer(&t.buffer); + } + } + } + convertMixerFormat(out, t1.mMixerFormat, + outTemp, t1.mMixerInFormat, numFrames * t1.mMixerChannelCount); + } +} + +// one track, 16 bits stereo without resampling is the most common case +void AudioMixer::process__OneTrack16BitsStereoNoResampling(state_t* state, + int64_t pts) +{ + ALOGVV("process__OneTrack16BitsStereoNoResampling\n"); + // This method is only called when state->enabledTracks has exactly + // one bit set. The asserts below would verify this, but are commented out + // since the whole point of this method is to optimize performance. + //ALOG_ASSERT(0 != state->enabledTracks, "no tracks enabled"); + const int i = 31 - __builtin_clz(state->enabledTracks); + //ALOG_ASSERT((1 << i) == state->enabledTracks, "more than 1 track enabled"); + const track_t& t = state->tracks[i]; + + AudioBufferProvider::Buffer& b(t.buffer); + + int32_t* out = t.mainBuffer; + float *fout = reinterpret_cast(out); + size_t numFrames = state->frameCount; + + const int16_t vl = t.volume[0]; + const int16_t vr = t.volume[1]; + const uint32_t vrl = t.volumeRL; + while (numFrames) { + b.frameCount = numFrames; + int64_t outputPTS = calculateOutputPTS(t, pts, out - t.mainBuffer); + t.bufferProvider->getNextBuffer(&b, outputPTS); + const int16_t *in = b.i16; + + // in == NULL can happen if the track was flushed just after having + // been enabled for mixing. + if (in == NULL || (((uintptr_t)in) & 3)) { + memset(out, 0, numFrames + * t.mMixerChannelCount * audio_bytes_per_sample(t.mMixerFormat)); + ALOGE_IF((((uintptr_t)in) & 3), + "process__OneTrack16BitsStereoNoResampling: misaligned buffer" + " %p track %d, channels %d, needs %08x, volume %08x vfl %f vfr %f", + in, i, t.channelCount, t.needs, vrl, t.mVolume[0], t.mVolume[1]); + return; + } + size_t outFrames = b.frameCount; + + switch (t.mMixerFormat) { + case AUDIO_FORMAT_PCM_FLOAT: + do { + uint32_t rl = *reinterpret_cast(in); + in += 2; + int32_t l = mulRL(1, rl, vrl); + int32_t r = mulRL(0, rl, vrl); + *fout++ = float_from_q4_27(l); + *fout++ = float_from_q4_27(r); + // Note: In case of later int16_t sink output, + // conversion and clamping is done by memcpy_to_i16_from_float(). + } while (--outFrames); + break; + case AUDIO_FORMAT_PCM_16_BIT: + if (CC_UNLIKELY(uint32_t(vl) > UNITY_GAIN_INT || uint32_t(vr) > UNITY_GAIN_INT)) { + // volume is boosted, so we might need to clamp even though + // we process only one track. + do { + uint32_t rl = *reinterpret_cast(in); + in += 2; + int32_t l = mulRL(1, rl, vrl) >> 12; + int32_t r = mulRL(0, rl, vrl) >> 12; + // clamping... + l = clamp16(l); + r = clamp16(r); + *out++ = (r<<16) | (l & 0xFFFF); + } while (--outFrames); + } else { + do { + uint32_t rl = *reinterpret_cast(in); + in += 2; + int32_t l = mulRL(1, rl, vrl) >> 12; + int32_t r = mulRL(0, rl, vrl) >> 12; + *out++ = (r<<16) | (l & 0xFFFF); + } while (--outFrames); + } + break; + default: + LOG_ALWAYS_FATAL("bad mixer format: %d", t.mMixerFormat); + } + numFrames -= b.frameCount; + t.bufferProvider->releaseBuffer(&b); + } +} + +int64_t AudioMixer::calculateOutputPTS(const track_t& t, int64_t basePTS, + int outputFrameIndex) +{ + if (AudioBufferProvider::kInvalidPTS == basePTS) { + return AudioBufferProvider::kInvalidPTS; + } + + return basePTS + ((outputFrameIndex * sLocalTimeFreq) / t.sampleRate); +} + +/*static*/ uint64_t AudioMixer::sLocalTimeFreq; +/*static*/ pthread_once_t AudioMixer::sOnceControl = PTHREAD_ONCE_INIT; + +/*static*/ void AudioMixer::sInitRoutine() +{ + LocalClock lc; + sLocalTimeFreq = lc.getLocalFreq(); // for the resampler + + DownmixerBufferProvider::init(); // for the downmixer +} + +/* TODO: consider whether this level of optimization is necessary. + * Perhaps just stick with a single for loop. + */ + +// Needs to derive a compile time constant (constexpr). Could be targeted to go +// to a MONOVOL mixtype based on MAX_NUM_VOLUMES, but that's an unnecessary complication. +#define MIXTYPE_MONOVOL(mixtype) (mixtype == MIXTYPE_MULTI ? MIXTYPE_MULTI_MONOVOL : \ + mixtype == MIXTYPE_MULTI_SAVEONLY ? MIXTYPE_MULTI_SAVEONLY_MONOVOL : mixtype) + +/* MIXTYPE (see AudioMixerOps.h MIXTYPE_* enumeration) + * TO: int32_t (Q4.27) or float + * TI: int32_t (Q4.27) or int16_t (Q0.15) or float + * TA: int32_t (Q4.27) + */ +template +static void volumeRampMulti(uint32_t channels, TO* out, size_t frameCount, + const TI* in, TA* aux, TV *vol, const TV *volinc, TAV *vola, TAV volainc) +{ + switch (channels) { + case 1: + volumeRampMulti(out, frameCount, in, aux, vol, volinc, vola, volainc); + break; + case 2: + volumeRampMulti(out, frameCount, in, aux, vol, volinc, vola, volainc); + break; + case 3: + volumeRampMulti(out, + frameCount, in, aux, vol, volinc, vola, volainc); + break; + case 4: + volumeRampMulti(out, + frameCount, in, aux, vol, volinc, vola, volainc); + break; + case 5: + volumeRampMulti(out, + frameCount, in, aux, vol, volinc, vola, volainc); + break; + case 6: + volumeRampMulti(out, + frameCount, in, aux, vol, volinc, vola, volainc); + break; + case 7: + volumeRampMulti(out, + frameCount, in, aux, vol, volinc, vola, volainc); + break; + case 8: + volumeRampMulti(out, + frameCount, in, aux, vol, volinc, vola, volainc); + break; + } +} + +/* MIXTYPE (see AudioMixerOps.h MIXTYPE_* enumeration) + * TO: int32_t (Q4.27) or float + * TI: int32_t (Q4.27) or int16_t (Q0.15) or float + * TA: int32_t (Q4.27) + */ +template +static void volumeMulti(uint32_t channels, TO* out, size_t frameCount, + const TI* in, TA* aux, const TV *vol, TAV vola) +{ + switch (channels) { + case 1: + volumeMulti(out, frameCount, in, aux, vol, vola); + break; + case 2: + volumeMulti(out, frameCount, in, aux, vol, vola); + break; + case 3: + volumeMulti(out, frameCount, in, aux, vol, vola); + break; + case 4: + volumeMulti(out, frameCount, in, aux, vol, vola); + break; + case 5: + volumeMulti(out, frameCount, in, aux, vol, vola); + break; + case 6: + volumeMulti(out, frameCount, in, aux, vol, vola); + break; + case 7: + volumeMulti(out, frameCount, in, aux, vol, vola); + break; + case 8: + volumeMulti(out, frameCount, in, aux, vol, vola); + break; + } +} + +/* MIXTYPE (see AudioMixerOps.h MIXTYPE_* enumeration) + * USEFLOATVOL (set to true if float volume is used) + * ADJUSTVOL (set to true if volume ramp parameters needs adjustment afterwards) + * TO: int32_t (Q4.27) or float + * TI: int32_t (Q4.27) or int16_t (Q0.15) or float + * TA: int32_t (Q4.27) + */ +template +void AudioMixer::volumeMix(TO *out, size_t outFrames, + const TI *in, TA *aux, bool ramp, AudioMixer::track_t *t) +{ + if (USEFLOATVOL) { + if (ramp) { + volumeRampMulti(t->mMixerChannelCount, out, outFrames, in, aux, + t->mPrevVolume, t->mVolumeInc, &t->prevAuxLevel, t->auxInc); + if (ADJUSTVOL) { + t->adjustVolumeRamp(aux != NULL, true); + } + } else { + volumeMulti(t->mMixerChannelCount, out, outFrames, in, aux, + t->mVolume, t->auxLevel); + } + } else { + if (ramp) { + volumeRampMulti(t->mMixerChannelCount, out, outFrames, in, aux, + t->prevVolume, t->volumeInc, &t->prevAuxLevel, t->auxInc); + if (ADJUSTVOL) { + t->adjustVolumeRamp(aux != NULL); + } + } else { + volumeMulti(t->mMixerChannelCount, out, outFrames, in, aux, + t->volume, t->auxLevel); + } + } +} + +/* This process hook is called when there is a single track without + * aux buffer, volume ramp, or resampling. + * TODO: Update the hook selection: this can properly handle aux and ramp. + * + * MIXTYPE (see AudioMixerOps.h MIXTYPE_* enumeration) + * TO: int32_t (Q4.27) or float + * TI: int32_t (Q4.27) or int16_t (Q0.15) or float + * TA: int32_t (Q4.27) + */ +template +void AudioMixer::process_NoResampleOneTrack(state_t* state, int64_t pts) +{ + ALOGVV("process_NoResampleOneTrack\n"); + // CLZ is faster than CTZ on ARM, though really not sure if true after 31 - clz. + const int i = 31 - __builtin_clz(state->enabledTracks); + ALOG_ASSERT((1 << i) == state->enabledTracks, "more than 1 track enabled"); + track_t *t = &state->tracks[i]; + const uint32_t channels = t->mMixerChannelCount; + TO* out = reinterpret_cast(t->mainBuffer); + TA* aux = reinterpret_cast(t->auxBuffer); + const bool ramp = t->needsRamp(); + + for (size_t numFrames = state->frameCount; numFrames; ) { + AudioBufferProvider::Buffer& b(t->buffer); + // get input buffer + b.frameCount = numFrames; + const int64_t outputPTS = calculateOutputPTS(*t, pts, state->frameCount - numFrames); + t->bufferProvider->getNextBuffer(&b, outputPTS); + const TI *in = reinterpret_cast(b.raw); + + // in == NULL can happen if the track was flushed just after having + // been enabled for mixing. + if (in == NULL || (((uintptr_t)in) & 3)) { + memset(out, 0, numFrames + * channels * audio_bytes_per_sample(t->mMixerFormat)); + ALOGE_IF((((uintptr_t)in) & 3), "process_NoResampleOneTrack: bus error: " + "buffer %p track %p, channels %d, needs %#x", + in, t, t->channelCount, t->needs); + return; + } + + const size_t outFrames = b.frameCount; + volumeMix::value, false> ( + out, outFrames, in, aux, ramp, t); + + out += outFrames * channels; + if (aux != NULL) { + aux += channels; + } + numFrames -= b.frameCount; + + // release buffer + t->bufferProvider->releaseBuffer(&b); + } + if (ramp) { + t->adjustVolumeRamp(aux != NULL, is_same::value); + } +} + +/* This track hook is called to do resampling then mixing, + * pulling from the track's upstream AudioBufferProvider. + * + * MIXTYPE (see AudioMixerOps.h MIXTYPE_* enumeration) + * TO: int32_t (Q4.27) or float + * TI: int32_t (Q4.27) or int16_t (Q0.15) or float + * TA: int32_t (Q4.27) + */ +template +void AudioMixer::track__Resample(track_t* t, TO* out, size_t outFrameCount, TO* temp, TA* aux) +{ + ALOGVV("track__Resample\n"); + t->resampler->setSampleRate(t->sampleRate); + const bool ramp = t->needsRamp(); + if (ramp || aux != NULL) { + // if ramp: resample with unity gain to temp buffer and scale/mix in 2nd step. + // if aux != NULL: resample with unity gain to temp buffer then apply send level. + + t->resampler->setVolume(UNITY_GAIN_FLOAT, UNITY_GAIN_FLOAT); + memset(temp, 0, outFrameCount * t->mMixerChannelCount * sizeof(TO)); + t->resampler->resample((int32_t*)temp, outFrameCount, t->bufferProvider); + + volumeMix::value, true>( + out, outFrameCount, temp, aux, ramp, t); + + } else { // constant volume gain + t->resampler->setVolume(t->mVolume[0], t->mVolume[1]); + t->resampler->resample((int32_t*)out, outFrameCount, t->bufferProvider); + } +} + +/* This track hook is called to mix a track, when no resampling is required. + * The input buffer should be present in t->in. + * + * MIXTYPE (see AudioMixerOps.h MIXTYPE_* enumeration) + * TO: int32_t (Q4.27) or float + * TI: int32_t (Q4.27) or int16_t (Q0.15) or float + * TA: int32_t (Q4.27) + */ +template +void AudioMixer::track__NoResample(track_t* t, TO* out, size_t frameCount, + TO* temp __unused, TA* aux) +{ + ALOGVV("track__NoResample\n"); + const TI *in = static_cast(t->in); + + volumeMix::value, true>( + out, frameCount, in, aux, t->needsRamp(), t); + + // MIXTYPE_MONOEXPAND reads a single input channel and expands to NCHAN output channels. + // MIXTYPE_MULTI reads NCHAN input channels and places to NCHAN output channels. + in += (MIXTYPE == MIXTYPE_MONOEXPAND) ? frameCount : frameCount * t->mMixerChannelCount; + t->in = in; +} + +/* The Mixer engine generates either int32_t (Q4_27) or float data. + * We use this function to convert the engine buffers + * to the desired mixer output format, either int16_t (Q.15) or float. + */ +void AudioMixer::convertMixerFormat(void *out, audio_format_t mixerOutFormat, + void *in, audio_format_t mixerInFormat, size_t sampleCount) +{ + switch (mixerInFormat) { + case AUDIO_FORMAT_PCM_FLOAT: + switch (mixerOutFormat) { + case AUDIO_FORMAT_PCM_FLOAT: + memcpy(out, in, sampleCount * sizeof(float)); // MEMCPY. TODO optimize out + break; + case AUDIO_FORMAT_PCM_16_BIT: + memcpy_to_i16_from_float((int16_t*)out, (float*)in, sampleCount); + break; + default: + LOG_ALWAYS_FATAL("bad mixerOutFormat: %#x", mixerOutFormat); + break; + } + break; + case AUDIO_FORMAT_PCM_16_BIT: + switch (mixerOutFormat) { + case AUDIO_FORMAT_PCM_FLOAT: + memcpy_to_float_from_q4_27((float*)out, (int32_t*)in, sampleCount); + break; + case AUDIO_FORMAT_PCM_16_BIT: + // two int16_t are produced per iteration + ditherAndClamp((int32_t*)out, (int32_t*)in, sampleCount >> 1); + break; + default: + LOG_ALWAYS_FATAL("bad mixerOutFormat: %#x", mixerOutFormat); + break; + } + break; + default: + LOG_ALWAYS_FATAL("bad mixerInFormat: %#x", mixerInFormat); + break; + } +} + +/* Returns the proper track hook to use for mixing the track into the output buffer. + */ +AudioMixer::hook_t AudioMixer::getTrackHook(int trackType, uint32_t channelCount, + audio_format_t mixerInFormat, audio_format_t mixerOutFormat __unused) +{ + if (!kUseNewMixer && channelCount == FCC_2 && mixerInFormat == AUDIO_FORMAT_PCM_16_BIT) { + switch (trackType) { + case TRACKTYPE_NOP: + return track__nop; + case TRACKTYPE_RESAMPLE: + return track__genericResample; + case TRACKTYPE_NORESAMPLEMONO: + return track__16BitsMono; + case TRACKTYPE_NORESAMPLE: + return track__16BitsStereo; + default: + LOG_ALWAYS_FATAL("bad trackType: %d", trackType); + break; + } + } + LOG_ALWAYS_FATAL_IF(channelCount > MAX_NUM_CHANNELS); + switch (trackType) { + case TRACKTYPE_NOP: + return track__nop; + case TRACKTYPE_RESAMPLE: + switch (mixerInFormat) { + case AUDIO_FORMAT_PCM_FLOAT: + return (AudioMixer::hook_t) + track__Resample; + case AUDIO_FORMAT_PCM_16_BIT: + return (AudioMixer::hook_t)\ + track__Resample; + default: + LOG_ALWAYS_FATAL("bad mixerInFormat: %#x", mixerInFormat); + break; + } + break; + case TRACKTYPE_NORESAMPLEMONO: + switch (mixerInFormat) { + case AUDIO_FORMAT_PCM_FLOAT: + return (AudioMixer::hook_t) + track__NoResample; + case AUDIO_FORMAT_PCM_16_BIT: + return (AudioMixer::hook_t) + track__NoResample; + default: + LOG_ALWAYS_FATAL("bad mixerInFormat: %#x", mixerInFormat); + break; + } + break; + case TRACKTYPE_NORESAMPLE: + switch (mixerInFormat) { + case AUDIO_FORMAT_PCM_FLOAT: + return (AudioMixer::hook_t) + track__NoResample; + case AUDIO_FORMAT_PCM_16_BIT: + return (AudioMixer::hook_t) + track__NoResample; + default: + LOG_ALWAYS_FATAL("bad mixerInFormat: %#x", mixerInFormat); + break; + } + break; + default: + LOG_ALWAYS_FATAL("bad trackType: %d", trackType); + break; + } + return NULL; +} + +/* Returns the proper process hook for mixing tracks. Currently works only for + * PROCESSTYPE_NORESAMPLEONETRACK, a mix involving one track, no resampling. + * + * TODO: Due to the special mixing considerations of duplicating to + * a stereo output track, the input track cannot be MONO. This should be + * prevented by the caller. + */ +AudioMixer::process_hook_t AudioMixer::getProcessHook(int processType, uint32_t channelCount, + audio_format_t mixerInFormat, audio_format_t mixerOutFormat) +{ + if (processType != PROCESSTYPE_NORESAMPLEONETRACK) { // Only NORESAMPLEONETRACK + LOG_ALWAYS_FATAL("bad processType: %d", processType); + return NULL; + } + if (!kUseNewMixer && channelCount == FCC_2 && mixerInFormat == AUDIO_FORMAT_PCM_16_BIT) { + return process__OneTrack16BitsStereoNoResampling; + } + LOG_ALWAYS_FATAL_IF(channelCount > MAX_NUM_CHANNELS); + switch (mixerInFormat) { + case AUDIO_FORMAT_PCM_FLOAT: + switch (mixerOutFormat) { + case AUDIO_FORMAT_PCM_FLOAT: + return process_NoResampleOneTrack; + case AUDIO_FORMAT_PCM_16_BIT: + return process_NoResampleOneTrack; + default: + LOG_ALWAYS_FATAL("bad mixerOutFormat: %#x", mixerOutFormat); + break; + } + break; + case AUDIO_FORMAT_PCM_16_BIT: + switch (mixerOutFormat) { + case AUDIO_FORMAT_PCM_FLOAT: + return process_NoResampleOneTrack; + case AUDIO_FORMAT_PCM_16_BIT: + return process_NoResampleOneTrack; + default: + LOG_ALWAYS_FATAL("bad mixerOutFormat: %#x", mixerOutFormat); + break; + } + break; + default: + LOG_ALWAYS_FATAL("bad mixerInFormat: %#x", mixerInFormat); + break; + } + return NULL; +} + +// ---------------------------------------------------------------------------- +}; // namespace android diff --git a/multimedia/02_day/test-mixer/AudioMixer.h b/multimedia/02_day/test-mixer/AudioMixer.h new file mode 100644 index 0000000..3b972bb --- /dev/null +++ b/multimedia/02_day/test-mixer/AudioMixer.h @@ -0,0 +1,470 @@ +/* +** +** Copyright 2007, The Android Open Source Project +** +** Licensed under the Apache License, Version 2.0 (the "License"); +** you may not use this file except in compliance with the License. +** You may obtain a copy of the License at +** +** http://www.apache.org/licenses/LICENSE-2.0 +** +** Unless required by applicable law or agreed to in writing, software +** distributed under the License is distributed on an "AS IS" BASIS, +** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +** See the License for the specific language governing permissions and +** limitations under the License. +*/ + +#ifndef ANDROID_AUDIO_MIXER_H +#define ANDROID_AUDIO_MIXER_H + +#include +#include + +#include + +#include +#include "AudioResampler.h" + +#include +#include +#include + +// FIXME This is actually unity gain, which might not be max in future, expressed in U.12 +#define MAX_GAIN_INT AudioMixer::UNITY_GAIN_INT + +namespace android { + +// ---------------------------------------------------------------------------- + +class AudioMixer +{ +public: + AudioMixer(size_t frameCount, uint32_t sampleRate, + uint32_t maxNumTracks = MAX_NUM_TRACKS); + + /*virtual*/ ~AudioMixer(); // non-virtual saves a v-table, restore if sub-classed + + + // This mixer has a hard-coded upper limit of 32 active track inputs. + // Adding support for > 32 tracks would require more than simply changing this value. + static const uint32_t MAX_NUM_TRACKS = 32; + // maximum number of channels supported by the mixer + + // This mixer has a hard-coded upper limit of 8 channels for output. + static const uint32_t MAX_NUM_CHANNELS = 8; + static const uint32_t MAX_NUM_VOLUMES = 2; // stereo volume only + // maximum number of channels supported for the content + static const uint32_t MAX_NUM_CHANNELS_TO_DOWNMIX = AUDIO_CHANNEL_COUNT_MAX; + + static const uint16_t UNITY_GAIN_INT = 0x1000; + static const float UNITY_GAIN_FLOAT = 1.0f; + + enum { // names + + // track names (MAX_NUM_TRACKS units) + TRACK0 = 0x1000, + + // 0x2000 is unused + + // setParameter targets + TRACK = 0x3000, + RESAMPLE = 0x3001, + RAMP_VOLUME = 0x3002, // ramp to new volume + VOLUME = 0x3003, // don't ramp + + // set Parameter names + // for target TRACK + CHANNEL_MASK = 0x4000, + FORMAT = 0x4001, + MAIN_BUFFER = 0x4002, + AUX_BUFFER = 0x4003, + DOWNMIX_TYPE = 0X4004, + MIXER_FORMAT = 0x4005, // AUDIO_FORMAT_PCM_(FLOAT|16_BIT) + MIXER_CHANNEL_MASK = 0x4006, // Channel mask for mixer output + // for target RESAMPLE + SAMPLE_RATE = 0x4100, // Configure sample rate conversion on this track name; + // parameter 'value' is the new sample rate in Hz. + // Only creates a sample rate converter the first time that + // the track sample rate is different from the mix sample rate. + // If the new sample rate is the same as the mix sample rate, + // and a sample rate converter already exists, + // then the sample rate converter remains present but is a no-op. + RESET = 0x4101, // Reset sample rate converter without changing sample rate. + // This clears out the resampler's input buffer. + REMOVE = 0x4102, // Remove the sample rate converter on this track name; + // the track is restored to the mix sample rate. + // for target RAMP_VOLUME and VOLUME (8 channels max) + // FIXME use float for these 3 to improve the dynamic range + VOLUME0 = 0x4200, + VOLUME1 = 0x4201, + AUXLEVEL = 0x4210, + }; + + + // For all APIs with "name": TRACK0 <= name < TRACK0 + MAX_NUM_TRACKS + + // Allocate a track name. Returns new track name if successful, -1 on failure. + // The failure could be because of an invalid channelMask or format, or that + // the track capacity of the mixer is exceeded. + int getTrackName(audio_channel_mask_t channelMask, + audio_format_t format, int sessionId); + + // Free an allocated track by name + void deleteTrackName(int name); + + // Enable or disable an allocated track by name + void enable(int name); + void disable(int name); + + void setParameter(int name, int target, int param, void *value); + + void setBufferProvider(int name, AudioBufferProvider* bufferProvider); + void process(int64_t pts); + + uint32_t trackNames() const { return mTrackNames; } + + size_t getUnreleasedFrames(int name) const; + + static inline bool isValidPcmTrackFormat(audio_format_t format) { + return format == AUDIO_FORMAT_PCM_16_BIT || + format == AUDIO_FORMAT_PCM_24_BIT_PACKED || + format == AUDIO_FORMAT_PCM_32_BIT || + format == AUDIO_FORMAT_PCM_FLOAT; + } + +private: + + enum { + // FIXME this representation permits up to 8 channels + NEEDS_CHANNEL_COUNT__MASK = 0x00000007, + }; + + enum { + NEEDS_CHANNEL_1 = 0x00000000, // mono + NEEDS_CHANNEL_2 = 0x00000001, // stereo + + // sample format is not explicitly specified, and is assumed to be AUDIO_FORMAT_PCM_16_BIT + + NEEDS_MUTE = 0x00000100, + NEEDS_RESAMPLE = 0x00001000, + NEEDS_AUX = 0x00010000, + }; + + struct state_t; + struct track_t; + class CopyBufferProvider; + + typedef void (*hook_t)(track_t* t, int32_t* output, size_t numOutFrames, int32_t* temp, + int32_t* aux); + static const int BLOCKSIZE = 16; // 4 cache lines + + struct track_t { + uint32_t needs; + + // TODO: Eventually remove legacy integer volume settings + union { + int16_t volume[MAX_NUM_VOLUMES]; // U4.12 fixed point (top bit should be zero) + int32_t volumeRL; + }; + + int32_t prevVolume[MAX_NUM_VOLUMES]; + + // 16-byte boundary + + int32_t volumeInc[MAX_NUM_VOLUMES]; + int32_t auxInc; + int32_t prevAuxLevel; + + // 16-byte boundary + + int16_t auxLevel; // 0 <= auxLevel <= MAX_GAIN_INT, but signed for mul performance + uint16_t frameCount; + + uint8_t channelCount; // 1 or 2, redundant with (needs & NEEDS_CHANNEL_COUNT__MASK) + uint8_t unused_padding; // formerly format, was always 16 + uint16_t enabled; // actually bool + audio_channel_mask_t channelMask; + + // actual buffer provider used by the track hooks, see DownmixerBufferProvider below + // for how the Track buffer provider is wrapped by another one when dowmixing is required + AudioBufferProvider* bufferProvider; + + // 16-byte boundary + + mutable AudioBufferProvider::Buffer buffer; // 8 bytes + + hook_t hook; + const void* in; // current location in buffer + + // 16-byte boundary + + AudioResampler* resampler; + uint32_t sampleRate; + int32_t* mainBuffer; + int32_t* auxBuffer; + + // 16-byte boundary + AudioBufferProvider* mInputBufferProvider; // externally provided buffer provider. + CopyBufferProvider* mReformatBufferProvider; // provider wrapper for reformatting. + CopyBufferProvider* downmixerBufferProvider; // wrapper for channel conversion. + + int32_t sessionId; + + // 16-byte boundary + audio_format_t mMixerFormat; // output mix format: AUDIO_FORMAT_PCM_(FLOAT|16_BIT) + audio_format_t mFormat; // input track format + audio_format_t mMixerInFormat; // mix internal format AUDIO_FORMAT_PCM_(FLOAT|16_BIT) + // each track must be converted to this format. + + float mVolume[MAX_NUM_VOLUMES]; // floating point set volume + float mPrevVolume[MAX_NUM_VOLUMES]; // floating point previous volume + float mVolumeInc[MAX_NUM_VOLUMES]; // floating point volume increment + + float mAuxLevel; // floating point set aux level + float mPrevAuxLevel; // floating point prev aux level + float mAuxInc; // floating point aux increment + + // 16-byte boundary + audio_channel_mask_t mMixerChannelMask; + uint32_t mMixerChannelCount; + + bool needsRamp() { return (volumeInc[0] | volumeInc[1] | auxInc) != 0; } + bool setResampler(uint32_t trackSampleRate, uint32_t devSampleRate); + bool doesResample() const { return resampler != NULL; } + void resetResampler() { if (resampler != NULL) resampler->reset(); } + void adjustVolumeRamp(bool aux, bool useFloat = false); + size_t getUnreleasedFrames() const { return resampler != NULL ? + resampler->getUnreleasedFrames() : 0; }; + }; + + typedef void (*process_hook_t)(state_t* state, int64_t pts); + + // pad to 32-bytes to fill cache line + struct state_t { + uint32_t enabledTracks; + uint32_t needsChanged; + size_t frameCount; + process_hook_t hook; // one of process__*, never NULL + int32_t *outputTemp; + int32_t *resampleTemp; + NBLog::Writer* mLog; + int32_t reserved[1]; + // FIXME allocate dynamically to save some memory when maxNumTracks < MAX_NUM_TRACKS + track_t tracks[MAX_NUM_TRACKS] __attribute__((aligned(32))); + }; + + // Base AudioBufferProvider class used for DownMixerBufferProvider, RemixBufferProvider, + // and ReformatBufferProvider. + // It handles a private buffer for use in converting format or channel masks from the + // input data to a form acceptable by the mixer. + // TODO: Make a ResamplerBufferProvider when integers are entirely removed from the + // processing pipeline. + class CopyBufferProvider : public AudioBufferProvider { + public: + // Use a private buffer of bufferFrameCount frames (each frame is outputFrameSize bytes). + // If bufferFrameCount is 0, no private buffer is created and in-place modification of + // the upstream buffer provider's buffers is performed by copyFrames(). + CopyBufferProvider(size_t inputFrameSize, size_t outputFrameSize, + size_t bufferFrameCount); + virtual ~CopyBufferProvider(); + + // Overrides AudioBufferProvider methods + virtual status_t getNextBuffer(Buffer* buffer, int64_t pts); + virtual void releaseBuffer(Buffer* buffer); + + // Other public methods + + // call this to release the buffer to the upstream provider. + // treat it as an audio discontinuity for future samples. + virtual void reset(); + + // this function should be supplied by the derived class. It converts + // #frames in the *src pointer to the *dst pointer. It is public because + // some providers will allow this to work on arbitrary buffers outside + // of the internal buffers. + virtual void copyFrames(void *dst, const void *src, size_t frames) = 0; + + // set the upstream buffer provider. Consider calling "reset" before this function. + void setBufferProvider(AudioBufferProvider *p) { + mTrackBufferProvider = p; + } + + protected: + AudioBufferProvider* mTrackBufferProvider; + const size_t mInputFrameSize; + const size_t mOutputFrameSize; + private: + AudioBufferProvider::Buffer mBuffer; + const size_t mLocalBufferFrameCount; + void* mLocalBufferData; + size_t mConsumed; + }; + + // DownmixerBufferProvider wraps a track AudioBufferProvider to provide + // position dependent downmixing by an Audio Effect. + class DownmixerBufferProvider : public CopyBufferProvider { + public: + DownmixerBufferProvider(audio_channel_mask_t inputChannelMask, + audio_channel_mask_t outputChannelMask, audio_format_t format, + uint32_t sampleRate, int32_t sessionId, size_t bufferFrameCount); + virtual ~DownmixerBufferProvider(); + virtual void copyFrames(void *dst, const void *src, size_t frames); + bool isValid() const { return mDownmixHandle != NULL; } + + static status_t init(); + static bool isMultichannelCapable() { return sIsMultichannelCapable; } + + protected: + effect_handle_t mDownmixHandle; + effect_config_t mDownmixConfig; + + // effect descriptor for the downmixer used by the mixer + static effect_descriptor_t sDwnmFxDesc; + // indicates whether a downmix effect has been found and is usable by this mixer + static bool sIsMultichannelCapable; + // FIXME: should we allow effects outside of the framework? + // We need to here. A special ioId that must be <= -2 so it does not map to a session. + static const int32_t SESSION_ID_INVALID_AND_IGNORED = -2; + }; + + // RemixBufferProvider wraps a track AudioBufferProvider to perform an + // upmix or downmix to the proper channel count and mask. + class RemixBufferProvider : public CopyBufferProvider { + public: + RemixBufferProvider(audio_channel_mask_t inputChannelMask, + audio_channel_mask_t outputChannelMask, audio_format_t format, + size_t bufferFrameCount); + virtual void copyFrames(void *dst, const void *src, size_t frames); + + protected: + const audio_format_t mFormat; + const size_t mSampleSize; + const size_t mInputChannels; + const size_t mOutputChannels; + int8_t mIdxAry[sizeof(uint32_t)*8]; // 32 bits => channel indices + }; + + // ReformatBufferProvider wraps a track AudioBufferProvider to convert the input data + // to an acceptable mixer input format type. + class ReformatBufferProvider : public CopyBufferProvider { + public: + ReformatBufferProvider(int32_t channels, + audio_format_t inputFormat, audio_format_t outputFormat, + size_t bufferFrameCount); + virtual void copyFrames(void *dst, const void *src, size_t frames); + + protected: + const int32_t mChannels; + const audio_format_t mInputFormat; + const audio_format_t mOutputFormat; + }; + + // bitmask of allocated track names, where bit 0 corresponds to TRACK0 etc. + uint32_t mTrackNames; + + // bitmask of configured track names; ~0 if maxNumTracks == MAX_NUM_TRACKS, + // but will have fewer bits set if maxNumTracks < MAX_NUM_TRACKS + const uint32_t mConfiguredNames; + + const uint32_t mSampleRate; + + NBLog::Writer mDummyLog; +public: + void setLog(NBLog::Writer* log); +private: + state_t mState __attribute__((aligned(32))); + + // Call after changing either the enabled status of a track, or parameters of an enabled track. + // OK to call more often than that, but unnecessary. + void invalidateState(uint32_t mask); + + bool setChannelMasks(int name, + audio_channel_mask_t trackChannelMask, audio_channel_mask_t mixerChannelMask); + + // TODO: remove unused trackName/trackNum from functions below. + static status_t initTrackDownmix(track_t* pTrack, int trackName); + static status_t prepareTrackForDownmix(track_t* pTrack, int trackNum); + static void unprepareTrackForDownmix(track_t* pTrack, int trackName); + static status_t prepareTrackForReformat(track_t* pTrack, int trackNum); + static void unprepareTrackForReformat(track_t* pTrack, int trackName); + static void reconfigureBufferProviders(track_t* pTrack); + + static void track__genericResample(track_t* t, int32_t* out, size_t numFrames, int32_t* temp, + int32_t* aux); + static void track__nop(track_t* t, int32_t* out, size_t numFrames, int32_t* temp, int32_t* aux); + static void track__16BitsStereo(track_t* t, int32_t* out, size_t numFrames, int32_t* temp, + int32_t* aux); + static void track__16BitsMono(track_t* t, int32_t* out, size_t numFrames, int32_t* temp, + int32_t* aux); + static void volumeRampStereo(track_t* t, int32_t* out, size_t frameCount, int32_t* temp, + int32_t* aux); + static void volumeStereo(track_t* t, int32_t* out, size_t frameCount, int32_t* temp, + int32_t* aux); + + static void process__validate(state_t* state, int64_t pts); + static void process__nop(state_t* state, int64_t pts); + static void process__genericNoResampling(state_t* state, int64_t pts); + static void process__genericResampling(state_t* state, int64_t pts); + static void process__OneTrack16BitsStereoNoResampling(state_t* state, + int64_t pts); + + static int64_t calculateOutputPTS(const track_t& t, int64_t basePTS, + int outputFrameIndex); + + static uint64_t sLocalTimeFreq; + static pthread_once_t sOnceControl; + static void sInitRoutine(); + + /* multi-format volume mixing function (calls template functions + * in AudioMixerOps.h). The template parameters are as follows: + * + * MIXTYPE (see AudioMixerOps.h MIXTYPE_* enumeration) + * USEFLOATVOL (set to true if float volume is used) + * ADJUSTVOL (set to true if volume ramp parameters needs adjustment afterwards) + * TO: int32_t (Q4.27) or float + * TI: int32_t (Q4.27) or int16_t (Q0.15) or float + * TA: int32_t (Q4.27) + */ + template + static void volumeMix(TO *out, size_t outFrames, + const TI *in, TA *aux, bool ramp, AudioMixer::track_t *t); + + // multi-format process hooks + template + static void process_NoResampleOneTrack(state_t* state, int64_t pts); + + // multi-format track hooks + template + static void track__Resample(track_t* t, TO* out, size_t frameCount, + TO* temp __unused, TA* aux); + template + static void track__NoResample(track_t* t, TO* out, size_t frameCount, + TO* temp __unused, TA* aux); + + static void convertMixerFormat(void *out, audio_format_t mixerOutFormat, + void *in, audio_format_t mixerInFormat, size_t sampleCount); + + // hook types + enum { + PROCESSTYPE_NORESAMPLEONETRACK, + }; + enum { + TRACKTYPE_NOP, + TRACKTYPE_RESAMPLE, + TRACKTYPE_NORESAMPLE, + TRACKTYPE_NORESAMPLEMONO, + }; + + // functions for determining the proper process and track hooks. + static process_hook_t getProcessHook(int processType, uint32_t channelCount, + audio_format_t mixerInFormat, audio_format_t mixerOutFormat); + static hook_t getTrackHook(int trackType, uint32_t channelCount, + audio_format_t mixerInFormat, audio_format_t mixerOutFormat); +}; + +// ---------------------------------------------------------------------------- +}; // namespace android + +#endif // ANDROID_AUDIO_MIXER_H diff --git a/multimedia/02_day/test-mixer/c.wav b/multimedia/02_day/test-mixer/c.wav new file mode 100644 index 0000000..7db2370 Binary files /dev/null and b/multimedia/02_day/test-mixer/c.wav differ diff --git a/multimedia/02_day/test-mixer/test-mixer.cpp b/multimedia/02_day/test-mixer/test-mixer.cpp new file mode 100644 index 0000000..cfd4718 --- /dev/null +++ b/multimedia/02_day/test-mixer/test-mixer.cpp @@ -0,0 +1,306 @@ +/* + * Copyright (C) 2014 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include +#include +#include +#include +#include +#include "AudioMixer.h" +#include "test_utils.h" + +/* Testing is typically through creation of an output WAV file from several + * source inputs, to be later analyzed by an audio program such as Audacity. + * + * Sine or chirp functions are typically more useful as input to the mixer + * as they show up as straight lines on a spectrogram if successfully mixed. + * + * A sample shell script is provided: mixer_to_wave_tests.sh + */ + +using namespace android; + +static void usage(const char* name) { + fprintf(stderr, "Usage: %s [-f] [-m] [-c channels]" + " [-s sample-rate] [-o ] [-a ] [-P csv]" + " ( | )+\n", name); + fprintf(stderr, " -f enable floating point input track\n"); + fprintf(stderr, " -m enable floating point mixer output\n"); + fprintf(stderr, " -c number of mixer output channels\n"); + fprintf(stderr, " -s mixer sample-rate\n"); + fprintf(stderr, " -o WAV file, pcm16 (or float if -m specified)\n"); + fprintf(stderr, " -a \n"); + fprintf(stderr, " -P # frames provided per call to resample() in CSV format\n"); + fprintf(stderr, " is a WAV file\n"); + fprintf(stderr, " can be 'sine:,,'\n"); + fprintf(stderr, " 'chirp:,'\n"); +} + +static int writeFile(const char *filename, const void *buffer, + uint32_t sampleRate, uint32_t channels, size_t frames, bool isBufferFloat) { + if (filename == NULL) { + return 0; // ok to pass in NULL filename + } + // write output to file. + SF_INFO info; + info.frames = 0; + info.samplerate = sampleRate; + info.channels = channels; + info.format = SF_FORMAT_WAV | (isBufferFloat ? SF_FORMAT_FLOAT : SF_FORMAT_PCM_16); + printf("saving file:%s channels:%u samplerate:%u frames:%zu\n", + filename, info.channels, info.samplerate, frames); + SNDFILE *sf = sf_open(filename, SFM_WRITE, &info); + if (sf == NULL) { + perror(filename); + return EXIT_FAILURE; + } + if (isBufferFloat) { + (void) sf_writef_float(sf, (float*)buffer, frames); + } else { + (void) sf_writef_short(sf, (short*)buffer, frames); + } + sf_close(sf); + return EXIT_SUCCESS; +} + +int main(int argc, char* argv[]) { + const char* const progname = argv[0]; + bool useInputFloat = false; + bool useMixerFloat = false; + bool useRamp = true; + uint32_t outputSampleRate = 48000; + uint32_t outputChannels = 2; // stereo for now + std::vector Pvalues; + const char* outputFilename = NULL; + const char* auxFilename = NULL; + std::vector Names; + std::vector Providers; + + for (int ch; (ch = getopt(argc, argv, "fmc:s:o:a:P:")) != -1;) { + switch (ch) { + case 'f': + useInputFloat = true; + break; + case 'm': + useMixerFloat = true; + break; + case 'c': + outputChannels = atoi(optarg); + break; + case 's': + outputSampleRate = atoi(optarg); + break; + case 'o': + outputFilename = optarg; + break; + case 'a': + auxFilename = optarg; + break; + case 'P': + if (parseCSV(optarg, Pvalues) < 0) { + fprintf(stderr, "incorrect syntax for -P option\n"); + return EXIT_FAILURE; + } + break; + case '?': + default: + usage(progname); + return EXIT_FAILURE; + } + } + argc -= optind; + argv += optind; + + if (argc == 0) { + usage(progname); + return EXIT_FAILURE; + } + if ((unsigned)argc > AudioMixer::MAX_NUM_TRACKS) { + fprintf(stderr, "too many tracks: %d > %u", argc, AudioMixer::MAX_NUM_TRACKS); + return EXIT_FAILURE; + } + + size_t outputFrames = 0; + + // create providers for each track + Providers.resize(argc); + for (int i = 0; i < argc; ++i) { + static const char chirp[] = "chirp:"; + static const char sine[] = "sine:"; + static const double kSeconds = 10; + + if (!strncmp(argv[i], chirp, strlen(chirp))) { + std::vector v; + + parseCSV(argv[i] + strlen(chirp), v); + if (v.size() == 2) { + printf("creating chirp(%d %d)\n", v[0], v[1]); + if (useInputFloat) { + Providers[i].setChirp(v[0], 0, v[1]/2, v[1], kSeconds); + } else { + Providers[i].setChirp(v[0], 0, v[1]/2, v[1], kSeconds); + } + Providers[i].setIncr(Pvalues); + } else { + fprintf(stderr, "malformed input '%s'\n", argv[i]); + } + } else if (!strncmp(argv[i], sine, strlen(sine))) { + std::vector v; + + parseCSV(argv[i] + strlen(sine), v); + if (v.size() == 3) { + printf("creating sine(%d %d %d)\n", v[0], v[1], v[2]); + if (useInputFloat) { + Providers[i].setSine(v[0], v[1], v[2], kSeconds); + } else { + Providers[i].setSine(v[0], v[1], v[2], kSeconds); + } + Providers[i].setIncr(Pvalues); + } else { + fprintf(stderr, "malformed input '%s'\n", argv[i]); + } + } else { + printf("creating filename(%s)\n", argv[i]); + if (useInputFloat) { + Providers[i].setFile(argv[i]); + } else { + Providers[i].setFile(argv[i]); + } + Providers[i].setIncr(Pvalues); + } + // calculate the number of output frames + size_t nframes = (int64_t) Providers[i].getNumFrames() * outputSampleRate + / Providers[i].getSampleRate(); + if (i == 0 || outputFrames > nframes) { // choose minimum for outputFrames + outputFrames = nframes; + } + } + + // create the output buffer. + const size_t outputFrameSize = outputChannels + * (useMixerFloat ? sizeof(float) : sizeof(int16_t)); + const size_t outputSize = outputFrames * outputFrameSize; + const audio_channel_mask_t outputChannelMask = + audio_channel_out_mask_from_count(outputChannels); + void *outputAddr = NULL; + (void) posix_memalign(&outputAddr, 32, outputSize); + memset(outputAddr, 0, outputSize); + + // create the aux buffer, if needed. + const size_t auxFrameSize = sizeof(int32_t); // Q4.27 always + const size_t auxSize = outputFrames * auxFrameSize; + void *auxAddr = NULL; + if (auxFilename) { + (void) posix_memalign(&auxAddr, 32, auxSize); + memset(auxAddr, 0, auxSize); + } + + // create the mixer. + const size_t mixerFrameCount = 320; // typical numbers may range from 240 or 960 + AudioMixer *mixer = new AudioMixer(mixerFrameCount, outputSampleRate); + audio_format_t inputFormat = useInputFloat + ? AUDIO_FORMAT_PCM_FLOAT : AUDIO_FORMAT_PCM_16_BIT; + audio_format_t mixerFormat = useMixerFloat + ? AUDIO_FORMAT_PCM_FLOAT : AUDIO_FORMAT_PCM_16_BIT; + float f = AudioMixer::UNITY_GAIN_FLOAT / Providers.size(); // normalize volume by # tracks + static float f0; // zero + + // set up the tracks. + for (size_t i = 0; i < Providers.size(); ++i) { + //printf("track %d out of %d\n", i, Providers.size()); + uint32_t channelMask = audio_channel_out_mask_from_count(Providers[i].getNumChannels()); + int32_t name = mixer->getTrackName(channelMask, + inputFormat, AUDIO_SESSION_OUTPUT_MIX); + ALOG_ASSERT(name >= 0); + Names.push_back(name); + mixer->setBufferProvider(name, &Providers[i]); + mixer->setParameter(name, AudioMixer::TRACK, AudioMixer::MAIN_BUFFER, + (void *)outputAddr); + mixer->setParameter( + name, + AudioMixer::TRACK, + AudioMixer::MIXER_FORMAT, + (void *)(uintptr_t)mixerFormat); + mixer->setParameter( + name, + AudioMixer::TRACK, + AudioMixer::FORMAT, + (void *)(uintptr_t)inputFormat); + mixer->setParameter( + name, + AudioMixer::TRACK, + AudioMixer::MIXER_CHANNEL_MASK, + (void *)(uintptr_t)outputChannelMask); + mixer->setParameter( + name, + AudioMixer::TRACK, + AudioMixer::CHANNEL_MASK, + (void *)(uintptr_t)channelMask); + mixer->setParameter( + name, + AudioMixer::RESAMPLE, + AudioMixer::SAMPLE_RATE, + (void *)(uintptr_t)Providers[i].getSampleRate()); + if (useRamp) { + mixer->setParameter(name, AudioMixer::VOLUME, AudioMixer::VOLUME0, &f0); + mixer->setParameter(name, AudioMixer::VOLUME, AudioMixer::VOLUME1, &f0); + mixer->setParameter(name, AudioMixer::RAMP_VOLUME, AudioMixer::VOLUME0, &f); + mixer->setParameter(name, AudioMixer::RAMP_VOLUME, AudioMixer::VOLUME1, &f); + } else { + mixer->setParameter(name, AudioMixer::VOLUME, AudioMixer::VOLUME0, &f); + mixer->setParameter(name, AudioMixer::VOLUME, AudioMixer::VOLUME1, &f); + } + if (auxFilename) { + mixer->setParameter(name, AudioMixer::TRACK, AudioMixer::AUX_BUFFER, + (void *) auxAddr); + mixer->setParameter(name, AudioMixer::VOLUME, AudioMixer::AUXLEVEL, &f0); + mixer->setParameter(name, AudioMixer::RAMP_VOLUME, AudioMixer::AUXLEVEL, &f); + } + mixer->enable(name); + } + + // pump the mixer to process data. + size_t i; + for (i = 0; i < outputFrames - mixerFrameCount; i += mixerFrameCount) { + for (size_t j = 0; j < Names.size(); ++j) { + mixer->setParameter(Names[j], AudioMixer::TRACK, AudioMixer::MAIN_BUFFER, + (char *) outputAddr + i * outputFrameSize); + if (auxFilename) { + mixer->setParameter(Names[j], AudioMixer::TRACK, AudioMixer::AUX_BUFFER, + (char *) auxAddr + i * auxFrameSize); + } + } + mixer->process(AudioBufferProvider::kInvalidPTS); + } + outputFrames = i; // reset output frames to the data actually produced. + + // write to files + writeFile(outputFilename, outputAddr, + outputSampleRate, outputChannels, outputFrames, useMixerFloat); + if (auxFilename) { + // Aux buffer is always in q4_27 format for now. + // memcpy_to_i16_from_q4_27(), but with stereo frame count (not sample count) + ditherAndClamp((int32_t*)auxAddr, (int32_t*)auxAddr, outputFrames >> 1); + writeFile(auxFilename, auxAddr, outputSampleRate, 1, outputFrames, false); + } + + delete mixer; + free(outputAddr); + free(auxAddr); + return EXIT_SUCCESS; +} diff --git a/multimedia/02_day/test-mixer/test_utils.h b/multimedia/02_day/test-mixer/test_utils.h new file mode 100644 index 0000000..3d51cdc --- /dev/null +++ b/multimedia/02_day/test-mixer/test_utils.h @@ -0,0 +1,307 @@ +/* + * Copyright (C) 2014 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ANDROID_AUDIO_TEST_UTILS_H +#define ANDROID_AUDIO_TEST_UTILS_H + +#include + +#ifndef ARRAY_SIZE +#define ARRAY_SIZE(a) (sizeof(a) / sizeof((a)[0])) +#endif + +template +struct is_same +{ + static const bool value = false; +}; + +template +struct is_same // partial specialization +{ + static const bool value = true; +}; + +template +static inline T convertValue(double val) +{ + if (is_same::value) { + return floor(val * 32767.0 + 0.5); + } else if (is_same::value) { + return floor(val * (1UL<<31) + 0.5); + } + return val; // assume float or double +} + +// Convert a list of integers in CSV format to a Vector of those values. +// Returns the number of elements in the list, or -1 on error. +static inline int parseCSV(const char *string, std::vector& values) +{ + // pass 1: count the number of values and do syntax check + size_t numValues = 0; + bool hadDigit = false; + for (const char *p = string; ; ) { + switch (*p++) { + case '0': case '1': case '2': case '3': case '4': + case '5': case '6': case '7': case '8': case '9': + hadDigit = true; + break; + case '\0': + if (hadDigit) { + // pass 2: allocate and initialize vector of values + values.resize(++numValues); + values[0] = atoi(p = string); + for (size_t i = 1; i < numValues; ) { + if (*p++ == ',') { + values[i++] = atoi(p); + } + } + return numValues; + } + // fall through + case ',': + if (hadDigit) { + hadDigit = false; + numValues++; + break; + } + // fall through + default: + return -1; + } + } +} + +/* Creates a type-independent audio buffer provider from + * a buffer base address, size, framesize, and input increment array. + * + * No allocation or deallocation of the provided buffer is done. + */ +class TestProvider : public android::AudioBufferProvider { +public: + TestProvider(void* addr, size_t frames, size_t frameSize, + const std::vector& inputIncr) + : mAddr(addr), + mNumFrames(frames), + mFrameSize(frameSize), + mNextFrame(0), mUnrel(0), mInputIncr(inputIncr), mNextIdx(0) + { + } + + TestProvider() + : mAddr(NULL), mNumFrames(0), mFrameSize(0), + mNextFrame(0), mUnrel(0), mNextIdx(0) + { + } + + void setIncr(const std::vector& inputIncr) { + mInputIncr = inputIncr; + mNextIdx = 0; + } + + virtual android::status_t getNextBuffer(Buffer* buffer, int64_t pts __unused = kInvalidPTS) + { + size_t requestedFrames = buffer->frameCount; + if (requestedFrames > mNumFrames - mNextFrame) { + buffer->frameCount = mNumFrames - mNextFrame; + } + if (!mInputIncr.empty()) { + size_t provided = mInputIncr[mNextIdx++]; + ALOGV("getNextBuffer() mValue[%zu]=%zu not %zu", + mNextIdx-1, provided, buffer->frameCount); + if (provided < buffer->frameCount) { + buffer->frameCount = provided; + } + if (mNextIdx >= mInputIncr.size()) { + mNextIdx = 0; + } + } + ALOGV("getNextBuffer() requested %zu frames out of %zu frames available" + " and returned %zu frames", + requestedFrames, mNumFrames - mNextFrame, buffer->frameCount); + mUnrel = buffer->frameCount; + if (buffer->frameCount > 0) { + buffer->raw = (char *)mAddr + mFrameSize * mNextFrame; + return android::NO_ERROR; + } else { + buffer->raw = NULL; + return android::NOT_ENOUGH_DATA; + } + } + + virtual void releaseBuffer(Buffer* buffer) + { + if (buffer->frameCount > mUnrel) { + ALOGE("releaseBuffer() released %zu frames but only %zu available " + "to release", buffer->frameCount, mUnrel); + mNextFrame += mUnrel; + mUnrel = 0; + } else { + + ALOGV("releaseBuffer() released %zu frames out of %zu frames available " + "to release", buffer->frameCount, mUnrel); + mNextFrame += buffer->frameCount; + mUnrel -= buffer->frameCount; + } + buffer->frameCount = 0; + buffer->raw = NULL; + } + + void reset() + { + mNextFrame = 0; + } + + size_t getNumFrames() + { + return mNumFrames; + } + + +protected: + void* mAddr; // base address + size_t mNumFrames; // total frames + int mFrameSize; // frame size (# channels * bytes per sample) + size_t mNextFrame; // index of next frame to provide + size_t mUnrel; // number of frames not yet released + std::vector mInputIncr; // number of frames provided per call + size_t mNextIdx; // index of next entry in mInputIncr to use +}; + +/* Creates a buffer filled with a sine wave. + */ +template +static void createSine(void *vbuffer, size_t frames, + size_t channels, double sampleRate, double freq) +{ + double tscale = 1. / sampleRate; + T* buffer = reinterpret_cast(vbuffer); + for (size_t i = 0; i < frames; ++i) { + double t = i * tscale; + double y = sin(2. * M_PI * freq * t); + T yt = convertValue(y); + + for (size_t j = 0; j < channels; ++j) { + buffer[i*channels + j] = yt / T(j + 1); + } + } +} + +/* Creates a buffer filled with a chirp signal (a sine wave sweep). + * + * When creating the Chirp, note that the frequency is the true sinusoidal + * frequency not the sampling rate. + * + * http://en.wikipedia.org/wiki/Chirp + */ +template +static void createChirp(void *vbuffer, size_t frames, + size_t channels, double sampleRate, double minfreq, double maxfreq) +{ + double tscale = 1. / sampleRate; + T *buffer = reinterpret_cast(vbuffer); + // note the chirp constant k has a divide-by-two. + double k = (maxfreq - minfreq) / (2. * tscale * frames); + for (size_t i = 0; i < frames; ++i) { + double t = i * tscale; + double y = sin(2. * M_PI * (k * t + minfreq) * t); + T yt = convertValue(y); + + for (size_t j = 0; j < channels; ++j) { + buffer[i*channels + j] = yt / T(j + 1); + } + } +} + +/* This derived class creates a buffer provider of datatype T, + * consisting of an input signal, e.g. from createChirp(). + * The number of frames can be obtained from the base class + * TestProvider::getNumFrames(). + */ + +class SignalProvider : public TestProvider { +public: + SignalProvider() + : mSampleRate(0), + mChannels(0) + { + } + + virtual ~SignalProvider() + { + free(mAddr); + mAddr = NULL; + } + + template + void setChirp(size_t channels, double minfreq, double maxfreq, double sampleRate, double time) + { + createBufferByFrames(channels, sampleRate, sampleRate*time); + createChirp(mAddr, mNumFrames, mChannels, mSampleRate, minfreq, maxfreq); + } + + template + void setSine(size_t channels, + double freq, double sampleRate, double time) + { + createBufferByFrames(channels, sampleRate, sampleRate*time); + createSine(mAddr, mNumFrames, mChannels, mSampleRate, freq); + } + + template + void setFile(const char *file_in) + { + SF_INFO info; + info.format = 0; + SNDFILE *sf = sf_open(file_in, SFM_READ, &info); + if (sf == NULL) { + perror(file_in); + return; + } + createBufferByFrames(info.channels, info.samplerate, info.frames); + if (is_same::value) { + (void) sf_readf_float(sf, (float *) mAddr, mNumFrames); + } else if (is_same::value) { + (void) sf_readf_short(sf, (short *) mAddr, mNumFrames); + } + sf_close(sf); + } + + template + void createBufferByFrames(size_t channels, uint32_t sampleRate, size_t frames) + { + mNumFrames = frames; + mChannels = channels; + mFrameSize = mChannels * sizeof(T); + free(mAddr); + mAddr = malloc(mFrameSize * mNumFrames); + mSampleRate = sampleRate; + } + + uint32_t getSampleRate() const { + return mSampleRate; + } + + uint32_t getNumChannels() const { + return mChannels; + } + +protected: + uint32_t mSampleRate; + uint32_t mChannels; +}; + +#endif // ANDROID_AUDIO_TEST_UTILS_H diff --git a/multimedia/02_day/test-resample/Android.mk b/multimedia/02_day/test-resample/Android.mk new file mode 100644 index 0000000..5d9ca4f --- /dev/null +++ b/multimedia/02_day/test-resample/Android.mk @@ -0,0 +1,27 @@ +LOCAL_PATH:= $(call my-dir) + + +include $(CLEAR_VARS) + +LOCAL_SRC_FILES:= \ + test-resample.cpp \ + +LOCAL_C_INCLUDES := \ + $(call include-path-for, audio-utils) + +LOCAL_STATIC_LIBRARIES := \ + libsndfile + +LOCAL_SHARED_LIBRARIES := \ + libaudioresampler \ + libaudioutils \ + libdl \ + libcutils \ + libutils \ + liblog + +LOCAL_MODULE:= test-resample + +LOCAL_MODULE_TAGS := optional + +include $(BUILD_EXECUTABLE) diff --git a/multimedia/02_day/test-resample/AudioResampler.h b/multimedia/02_day/test-resample/AudioResampler.h new file mode 100644 index 0000000..cdc6d92 --- /dev/null +++ b/multimedia/02_day/test-resample/AudioResampler.h @@ -0,0 +1,174 @@ +/* + * Copyright (C) 2007 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ANDROID_AUDIO_RESAMPLER_H +#define ANDROID_AUDIO_RESAMPLER_H + +#include +#include +#include + +#include +#include + +namespace android { +// ---------------------------------------------------------------------------- + +class ANDROID_API AudioResampler { +public: + // Determines quality of SRC. + // LOW_QUALITY: linear interpolator (1st order) + // MED_QUALITY: cubic interpolator (3rd order) + // HIGH_QUALITY: fixed multi-tap FIR (e.g. 48KHz->44.1KHz) + // NOTE: high quality SRC will only be supported for + // certain fixed rate conversions. Sample rate cannot be + // changed dynamically. + enum src_quality { + DEFAULT_QUALITY=0, + LOW_QUALITY=1, + MED_QUALITY=2, + HIGH_QUALITY=3, + VERY_HIGH_QUALITY=4, + DYN_LOW_QUALITY=5, + DYN_MED_QUALITY=6, + DYN_HIGH_QUALITY=7, + }; + + static const float UNITY_GAIN_FLOAT = 1.0f; + + static AudioResampler* create(audio_format_t format, int inChannelCount, + int32_t sampleRate, src_quality quality=DEFAULT_QUALITY); + + virtual ~AudioResampler(); + + virtual void init() = 0; + virtual void setSampleRate(int32_t inSampleRate); + virtual void setVolume(float left, float right); + virtual void setLocalTimeFreq(uint64_t freq); + + // set the PTS of the next buffer output by the resampler + virtual void setPTS(int64_t pts); + + // Resample int16_t samples from provider and accumulate into 'out'. + // A mono provider delivers a sequence of samples. + // A stereo provider delivers a sequence of interleaved pairs of samples. + // Multi-channel providers are not supported. + // In either case, 'out' holds interleaved pairs of fixed-point Q4.27. + // That is, for a mono provider, there is an implicit up-channeling. + // Since this method accumulates, the caller is responsible for clearing 'out' initially. + // FIXME assumes provider is always successful; it should return the actual frame count. + virtual void resample(int32_t* out, size_t outFrameCount, + AudioBufferProvider* provider) = 0; + + virtual void reset(); + virtual size_t getUnreleasedFrames() const { return mInputIndex; } + + // called from destructor, so must not be virtual + src_quality getQuality() const { return mQuality; } + +protected: + // number of bits for phase fraction - 30 bits allows nearly 2x downsampling + static const int kNumPhaseBits = 30; + + // phase mask for fraction + static const uint32_t kPhaseMask = (1LU<(outFrameCount)*mInSampleRate + (mSampleRate - 1))/mSampleRate; + // + // The double precision equivalent (float may not be precise enough): + // ceil(static_cast(outFrameCount) * mInSampleRate / mSampleRate); + // + // this relies on the fact that the mPhaseIncrement is rounded down from + // #phases * mInSampleRate/mSampleRate and the fact that Sum(Floor(x)) <= Floor(Sum(x)). + // http://www.proofwiki.org/wiki/Sum_of_Floors_Not_Greater_Than_Floor_of_Sums + // + // (so long as double precision is computed accurately enough to be considered + // greater than or equal to the Floor(x) value in int32_t arithmetic; thus this + // will not necessarily hold for floats). + // + // TODO: + // Greater accuracy and a tight bound is obtained by: + // 1) subtract and adjust for the current state of the AudioBufferProvider buffer. + // 2) using the exact integer formula where (ignoring 64b casting) + // inFrameCount = (mPhaseIncrement * (outFrameCount - 1) + mPhaseFraction) / phaseWrapLimit; + // phaseWrapLimit is the wraparound (1 << kNumPhaseBits), if not specified explicitly. + // + inline size_t getInFrameCountRequired(size_t outFrameCount) { + return (static_cast(outFrameCount)*mInSampleRate + + (mSampleRate - 1))/mSampleRate; + } + + inline float clampFloatVol(float volume) { + if (volume > UNITY_GAIN_FLOAT) { + return UNITY_GAIN_FLOAT; + } else if (volume >= 0.) { + return volume; + } + return 0.; // NaN or negative volume maps to 0. + } + +private: + const src_quality mQuality; + + // Return 'true' if the quality level is supported without explicit request + static bool qualityIsSupported(src_quality quality); + + // For pthread_once() + static void init_routine(); + + // Return the estimated CPU load for specific resampler in MHz. + // The absolute number is irrelevant, it's the relative values that matter. + static uint32_t qualityMHz(src_quality quality); +}; + +// ---------------------------------------------------------------------------- +} +; // namespace android + +#endif // ANDROID_AUDIO_RESAMPLER_H diff --git a/multimedia/02_day/test-resample/test-resample.cpp b/multimedia/02_day/test-resample/test-resample.cpp new file mode 100644 index 0000000..84a655a --- /dev/null +++ b/multimedia/02_day/test-resample/test-resample.cpp @@ -0,0 +1,509 @@ +/* + * Copyright (C) 2012 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "AudioResampler.h" + +using namespace android; + +static bool gVerbose = false; + +static int usage(const char* name) { + fprintf(stderr,"Usage: %s [-p] [-f] [-F] [-v] [-c channels]" + " [-q {dq|lq|mq|hq|vhq|dlq|dmq|dhq}]" + " [-i input-sample-rate] [-o output-sample-rate]" + " [-O csv] [-P csv] []" + " \n", name); + fprintf(stderr," -p enable profiling\n"); + fprintf(stderr," -f enable filter profiling\n"); + fprintf(stderr," -F enable floating point -q {dlq|dmq|dhq} only"); + fprintf(stderr," -v verbose : log buffer provider calls\n"); + fprintf(stderr," -c # channels (1-2 for lq|mq|hq; 1-8 for dlq|dmq|dhq)\n"); + fprintf(stderr," -q resampler quality\n"); + fprintf(stderr," dq : default quality\n"); + fprintf(stderr," lq : low quality\n"); + fprintf(stderr," mq : medium quality\n"); + fprintf(stderr," hq : high quality\n"); + fprintf(stderr," vhq : very high quality\n"); + fprintf(stderr," dlq : dynamic low quality\n"); + fprintf(stderr," dmq : dynamic medium quality\n"); + fprintf(stderr," dhq : dynamic high quality\n"); + fprintf(stderr," -i input file sample rate (ignored if input file is specified)\n"); + fprintf(stderr," -o output file sample rate\n"); + fprintf(stderr," -O # frames output per call to resample() in CSV format\n"); + fprintf(stderr," -P # frames provided per call to resample() in CSV format\n"); + return -1; +} + +// Convert a list of integers in CSV format to a Vector of those values. +// Returns the number of elements in the list, or -1 on error. +int parseCSV(const char *string, Vector& values) +{ + // pass 1: count the number of values and do syntax check + size_t numValues = 0; + bool hadDigit = false; + for (const char *p = string; ; ) { + switch (*p++) { + case '0': case '1': case '2': case '3': case '4': + case '5': case '6': case '7': case '8': case '9': + hadDigit = true; + break; + case '\0': + if (hadDigit) { + // pass 2: allocate and initialize vector of values + values.resize(++numValues); + values.editItemAt(0) = atoi(p = optarg); + for (size_t i = 1; i < numValues; ) { + if (*p++ == ',') { + values.editItemAt(i++) = atoi(p); + } + } + return numValues; + } + // fall through + case ',': + if (hadDigit) { + hadDigit = false; + numValues++; + break; + } + // fall through + default: + return -1; + } + } +} + +int main(int argc, char* argv[]) { + const char* const progname = argv[0]; + bool profileResample = false; + bool profileFilter = false; + bool useFloat = false; + int channels = 1; + int input_freq = 0; + int output_freq = 0; + AudioResampler::src_quality quality = AudioResampler::DEFAULT_QUALITY; + Vector Ovalues; + Vector Pvalues; + + int ch; + while ((ch = getopt(argc, argv, "pfFvc:q:i:o:O:P:")) != -1) { + switch (ch) { + case 'p': + profileResample = true; + break; + case 'f': + profileFilter = true; + break; + case 'F': + useFloat = true; + break; + case 'v': + gVerbose = true; + break; + case 'c': + channels = atoi(optarg); + break; + case 'q': + if (!strcmp(optarg, "dq")) + quality = AudioResampler::DEFAULT_QUALITY; + else if (!strcmp(optarg, "lq")) + quality = AudioResampler::LOW_QUALITY; + else if (!strcmp(optarg, "mq")) + quality = AudioResampler::MED_QUALITY; + else if (!strcmp(optarg, "hq")) + quality = AudioResampler::HIGH_QUALITY; + else if (!strcmp(optarg, "vhq")) + quality = AudioResampler::VERY_HIGH_QUALITY; + else if (!strcmp(optarg, "dlq")) + quality = AudioResampler::DYN_LOW_QUALITY; + else if (!strcmp(optarg, "dmq")) + quality = AudioResampler::DYN_MED_QUALITY; + else if (!strcmp(optarg, "dhq")) + quality = AudioResampler::DYN_HIGH_QUALITY; + else { + usage(progname); + return -1; + } + break; + case 'i': + input_freq = atoi(optarg); + break; + case 'o': + output_freq = atoi(optarg); + break; + case 'O': + if (parseCSV(optarg, Ovalues) < 0) { + fprintf(stderr, "incorrect syntax for -O option\n"); + return -1; + } + break; + case 'P': + if (parseCSV(optarg, Pvalues) < 0) { + fprintf(stderr, "incorrect syntax for -P option\n"); + return -1; + } + break; + case '?': + default: + usage(progname); + return -1; + } + } + + if (channels < 1 + || channels > (quality < AudioResampler::DYN_LOW_QUALITY ? 2 : 8)) { + fprintf(stderr, "invalid number of audio channels %d\n", channels); + return -1; + } + if (useFloat && quality < AudioResampler::DYN_LOW_QUALITY) { + fprintf(stderr, "float processing is only possible for dynamic resamplers\n"); + return -1; + } + + argc -= optind; + argv += optind; + + const char* file_in = NULL; + const char* file_out = NULL; + if (argc == 1) { + file_out = argv[0]; + } else if (argc == 2) { + file_in = argv[0]; + file_out = argv[1]; + } else { + usage(progname); + return -1; + } + + // ---------------------------------------------------------- + + size_t input_size; + void* input_vaddr; + if (argc == 2) { + SF_INFO info; + info.format = 0; + SNDFILE *sf = sf_open(file_in, SFM_READ, &info); + if (sf == NULL) { + perror(file_in); + return EXIT_FAILURE; + } + input_size = info.frames * info.channels * sizeof(short); + input_vaddr = malloc(input_size); + (void) sf_readf_short(sf, (short *) input_vaddr, info.frames); + sf_close(sf); + channels = info.channels; + input_freq = info.samplerate; + } else { + // data for testing is exactly (input sampling rate/1000)/2 seconds + // so 44.1khz input is 22.05 seconds + double k = 1000; // Hz / s + double time = (input_freq / 2) / k; + size_t input_frames = size_t(input_freq * time); + input_size = channels * sizeof(int16_t) * input_frames; + input_vaddr = malloc(input_size); + int16_t* in = (int16_t*)input_vaddr; + for (size_t i=0 ; i(new_vaddr), + reinterpret_cast(input_vaddr), input_frames * channels); + free(input_vaddr); + input_vaddr = new_vaddr; + } + + // ---------------------------------------------------------- + + class Provider: public AudioBufferProvider { + const void* mAddr; // base address + const size_t mNumFrames; // total frames + const size_t mFrameSize; // size of each frame in bytes + size_t mNextFrame; // index of next frame to provide + size_t mUnrel; // number of frames not yet released + const Vector mPvalues; // number of frames provided per call + size_t mNextPidx; // index of next entry in mPvalues to use + public: + Provider(const void* addr, size_t frames, size_t frameSize, const Vector& Pvalues) + : mAddr(addr), + mNumFrames(frames), + mFrameSize(frameSize), + mNextFrame(0), mUnrel(0), mPvalues(Pvalues), mNextPidx(0) { + } + virtual status_t getNextBuffer(Buffer* buffer, + int64_t pts = kInvalidPTS) { + (void)pts; // suppress warning + size_t requestedFrames = buffer->frameCount; + if (requestedFrames > mNumFrames - mNextFrame) { + buffer->frameCount = mNumFrames - mNextFrame; + } + if (!mPvalues.isEmpty()) { + size_t provided = mPvalues[mNextPidx++]; + printf("mPvalue[%zu]=%zu not %zu\n", mNextPidx-1, provided, buffer->frameCount); + if (provided < buffer->frameCount) { + buffer->frameCount = provided; + } + if (mNextPidx >= mPvalues.size()) { + mNextPidx = 0; + } + } + if (gVerbose) { + printf("getNextBuffer() requested %zu frames out of %zu frames available," + " and returned %zu frames\n", + requestedFrames, (size_t) (mNumFrames - mNextFrame), buffer->frameCount); + } + mUnrel = buffer->frameCount; + if (buffer->frameCount > 0) { + buffer->raw = (char *)mAddr + mFrameSize * mNextFrame; + return NO_ERROR; + } else { + buffer->raw = NULL; + return NOT_ENOUGH_DATA; + } + } + virtual void releaseBuffer(Buffer* buffer) { + if (buffer->frameCount > mUnrel) { + fprintf(stderr, "ERROR releaseBuffer() released %zu frames but only %zu available " + "to release\n", buffer->frameCount, mUnrel); + mNextFrame += mUnrel; + mUnrel = 0; + } else { + if (gVerbose) { + printf("releaseBuffer() released %zu frames out of %zu frames available " + "to release\n", buffer->frameCount, mUnrel); + } + mNextFrame += buffer->frameCount; + mUnrel -= buffer->frameCount; + } + buffer->frameCount = 0; + buffer->raw = NULL; + } + void reset() { + mNextFrame = 0; + } + } provider(input_vaddr, input_frames, input_framesize, Pvalues); + + if (gVerbose) { + printf("%zu input frames\n", input_frames); + } + + audio_format_t format = useFloat ? AUDIO_FORMAT_PCM_FLOAT : AUDIO_FORMAT_PCM_16_BIT; + int output_channels = channels > 2 ? channels : 2; // output is at least stereo samples + size_t output_framesize = output_channels * (useFloat ? sizeof(float) : sizeof(int32_t)); + size_t output_frames = ((int64_t) input_frames * output_freq) / input_freq; + size_t output_size = output_frames * output_framesize; + + if (profileFilter) { + // Check how fast sample rate changes are that require filter changes. + // The delta sample rate changes must indicate a downsampling ratio, + // and must be larger than 10% changes. + // + // On fast devices, filters should be generated between 0.1ms - 1ms. + // (single threaded). + AudioResampler* resampler = AudioResampler::create(format, channels, + 8000, quality); + int looplimit = 100; + timespec start, end; + clock_gettime(CLOCK_MONOTONIC, &start); + for (int i = 0; i < looplimit; ++i) { + resampler->setSampleRate(9000); + resampler->setSampleRate(12000); + resampler->setSampleRate(20000); + resampler->setSampleRate(30000); + } + clock_gettime(CLOCK_MONOTONIC, &end); + int64_t start_ns = start.tv_sec * 1000000000LL + start.tv_nsec; + int64_t end_ns = end.tv_sec * 1000000000LL + end.tv_nsec; + int64_t time = end_ns - start_ns; + printf("%.2f sample rate changes with filter calculation/sec\n", + looplimit * 4 / (time / 1e9)); + + // Check how fast sample rate changes are without filter changes. + // This should be very fast, probably 0.1us - 1us per sample rate + // change. + resampler->setSampleRate(1000); + looplimit = 1000; + clock_gettime(CLOCK_MONOTONIC, &start); + for (int i = 0; i < looplimit; ++i) { + resampler->setSampleRate(1000+i); + } + clock_gettime(CLOCK_MONOTONIC, &end); + start_ns = start.tv_sec * 1000000000LL + start.tv_nsec; + end_ns = end.tv_sec * 1000000000LL + end.tv_nsec; + time = end_ns - start_ns; + printf("%.2f sample rate changes without filter calculation/sec\n", + looplimit / (time / 1e9)); + resampler->reset(); + delete resampler; + } + + void* output_vaddr = malloc(output_size); + AudioResampler* resampler = AudioResampler::create(format, channels, + output_freq, quality); + + resampler->setSampleRate(input_freq); + resampler->setVolume(AudioResampler::UNITY_GAIN_FLOAT, AudioResampler::UNITY_GAIN_FLOAT); + + if (profileResample) { + /* + * For profiling on mobile devices, upon experimentation + * it is better to run a few trials with a shorter loop limit, + * and take the minimum time. + * + * Long tests can cause CPU temperature to build up and thermal throttling + * to reduce CPU frequency. + * + * For frequency checks (index=0, or 1, etc.): + * "cat /sys/devices/system/cpu/cpu${index}/cpufreq/scaling_*_freq" + * + * For temperature checks (index=0, or 1, etc.): + * "cat /sys/class/thermal/thermal_zone${index}/temp" + * + * Another way to avoid thermal throttling is to fix the CPU frequency + * at a lower level which prevents excessive temperatures. + */ + const int trials = 4; + const int looplimit = 4; + timespec start, end; + int64_t time = 0; + + for (int n = 0; n < trials; ++n) { + clock_gettime(CLOCK_MONOTONIC, &start); + for (int i = 0; i < looplimit; ++i) { + resampler->resample((int*) output_vaddr, output_frames, &provider); + provider.reset(); // during benchmarking reset only the provider + } + clock_gettime(CLOCK_MONOTONIC, &end); + int64_t start_ns = start.tv_sec * 1000000000LL + start.tv_nsec; + int64_t end_ns = end.tv_sec * 1000000000LL + end.tv_nsec; + int64_t diff_ns = end_ns - start_ns; + if (n == 0 || diff_ns < time) { + time = diff_ns; // save the best out of our trials. + } + } + // Mfrms/s is "Millions of output frames per second". + printf("quality: %d channels: %d msec: %" PRId64 " Mfrms/s: %.2lf\n", + quality, channels, time/1000000, output_frames * looplimit / (time / 1e9) / 1e6); + resampler->reset(); + } + + memset(output_vaddr, 0, output_size); + if (gVerbose) { + printf("resample() %zu output frames\n", output_frames); + } + if (Ovalues.isEmpty()) { + Ovalues.push(output_frames); + } + for (size_t i = 0, j = 0; i < output_frames; ) { + size_t thisFrames = Ovalues[j++]; + if (j >= Ovalues.size()) { + j = 0; + } + if (thisFrames == 0 || thisFrames > output_frames - i) { + thisFrames = output_frames - i; + } + resampler->resample((int*) output_vaddr + output_channels*i, thisFrames, &provider); + i += thisFrames; + } + if (gVerbose) { + printf("resample() complete\n"); + } + resampler->reset(); + if (gVerbose) { + printf("reset() complete\n"); + } + delete resampler; + resampler = NULL; + + // For float processing, convert output format from float to Q4.27, + // which is then converted to int16_t for final storage. + if (useFloat) { + memcpy_to_q4_27_from_float(reinterpret_cast(output_vaddr), + reinterpret_cast(output_vaddr), output_frames * output_channels); + } + + // mono takes left channel only (out of stereo output pair) + // stereo and multichannel preserve all channels. + int32_t* out = (int32_t*) output_vaddr; + int16_t* convert = (int16_t*) malloc(output_frames * channels * sizeof(int16_t)); + + const int volumeShift = 12; // shift requirement for Q4.27 to Q.15 + // round to half towards zero and saturate at int16 (non-dithered) + const int roundVal = (1<<(volumeShift-1)) - 1; // volumePrecision > 0 + + for (size_t i = 0; i < output_frames; i++) { + for (int j = 0; j < channels; j++) { + int32_t s = out[i * output_channels + j] + roundVal; // add offset here + if (s < 0) { + s = (s + 1) >> volumeShift; // round to 0 + if (s < -32768) { + s = -32768; + } + } else { + s = s >> volumeShift; + if (s > 32767) { + s = 32767; + } + } + convert[i * channels + j] = int16_t(s); + } + } + + // write output to disk + SF_INFO info; + info.frames = 0; + info.samplerate = output_freq; + info.channels = channels; + info.format = SF_FORMAT_WAV | SF_FORMAT_PCM_16; + SNDFILE *sf = sf_open(file_out, SFM_WRITE, &info); + if (sf == NULL) { + perror(file_out); + return EXIT_FAILURE; + } + (void) sf_writef_short(sf, convert, output_frames); + sf_close(sf); + + return EXIT_SUCCESS; +} diff --git a/multimedia/02_day/test-thread/Android.mk b/multimedia/02_day/test-thread/Android.mk new file mode 100644 index 0000000..7402d8a --- /dev/null +++ b/multimedia/02_day/test-thread/Android.mk @@ -0,0 +1,8 @@ +LOCAL_PATH:= $(call my-dir) +include $(CLEAR_VARS) +LOCAL_SRC_FILES:= thread.cpp +LOCAL_MODULE := my_thread +LOCAL_SHARED_LIBRARIES:= libcutils libutils libbinder +LOCAL_MODULE_TAGS := optional +include $(BUILD_EXECUTABLE) + diff --git a/multimedia/02_day/test-thread/thread.cpp b/multimedia/02_day/test-thread/thread.cpp new file mode 100644 index 0000000..efaafdc --- /dev/null +++ b/multimedia/02_day/test-thread/thread.cpp @@ -0,0 +1,27 @@ +#include +#include +#include +#include +#include + +using namespace android; + + +class MyThread : public Thread +{ + public : + bool threadLoop() + { + printf("MyThread::threadLoop()\n"); + sleep(1); + return true; + } +}; + +int main() +{ + sp thread = new MyThread; + thread->run(); + getchar(); + return 0; +} diff --git a/multimedia/03_day/test-bitmap/Android.mk b/multimedia/03_day/test-bitmap/Android.mk new file mode 100644 index 0000000..f20a994 --- /dev/null +++ b/multimedia/03_day/test-bitmap/Android.mk @@ -0,0 +1,14 @@ +LOCAL_PATH:= $(call my-dir) +include $(CLEAR_VARS) + +LOCAL_SRC_FILES:= test-bitmap.cpp + +LOCAL_SHARED_LIBRARIES := libutils libgui libui + +LOCAL_MODULE:= test-bitmap +LOCAL_CFLAGS:= -g -O0 + +LOCAL_MODULE_TAGS := optional + +include $(BUILD_EXECUTABLE) + diff --git a/multimedia/03_day/test-bitmap/bbb.bmp b/multimedia/03_day/test-bitmap/bbb.bmp new file mode 100644 index 0000000..5d77e2c Binary files /dev/null and b/multimedia/03_day/test-bitmap/bbb.bmp differ diff --git a/multimedia/03_day/test-bitmap/test-bitmap.cpp b/multimedia/03_day/test-bitmap/test-bitmap.cpp new file mode 100644 index 0000000..faa3ebe --- /dev/null +++ b/multimedia/03_day/test-bitmap/test-bitmap.cpp @@ -0,0 +1,133 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +using namespace android; + +typedef unsigned char u8; +typedef unsigned short u16; +typedef unsigned int u32; + +struct BITMAPFILEHEADER { + u16 bfType; + u32 bfSize; + u16 bfReserved1; + u16 bfReserved2; + u32 bfOffBits; +} __attribute((packed)); + +struct BITMAPINFOHEADER { + u32 biSize; + u32 biWidth; + u32 biHeight; + u16 biPlanes; + u16 biBitCount; + u32 biCompression; + u32 biSizeImage; + u32 biXPelsPerMeter; + u32 biYPelsPerMeter; + u32 biClrUsed; + u32 biClrImportant; +} __attribute((packed)); + + +void fillRGBA8BMP(uint8_t* dst, uint8_t* src, int w, int h, int stride, int bpp) +{ + const size_t PIXEL_SIZE = 4; + src = src+w*h*bpp; + for (int y = 0; y < h; y++) { + src -= w*bpp; + for (int x = 0; x < w; x++) { + off_t offset = (y * stride + x) * PIXEL_SIZE; + dst[offset + 0] = src[x*bpp+2]; + dst[offset + 1] = src[x*bpp+1]; + dst[offset + 2] = src[x*bpp+0]; + dst[offset + 3] = 255; + } + } +} + +int main() +{ + int fd; + int w,h; + BITMAPFILEHEADER file_header; + BITMAPINFOHEADER DIB; + + fd = open("bbb.bmp", O_RDONLY); + printf("fd=%d\n", fd ); + read( fd, &file_header, sizeof file_header ); + read( fd, &DIB, sizeof DIB ); + w = DIB.biWidth; + h = DIB.biHeight; + printf("size=%u\n", file_header.bfSize ); + printf("BitCount=%u\n", DIB.biBitCount ); + int bpp = DIB.biBitCount/8; + uint8_t *src = (uint8_t*)malloc( DIB.biSizeImage*bpp ); + read( fd, src, DIB.biSizeImage*bpp ); + sp gsf; + status_t nState; + + printf("create SurfaceComposerClient\n"); + + sp composerClient; + sp control; + + composerClient = new SurfaceComposerClient; + composerClient->initCheck(); + + printf("create video surface\n"); + + control = composerClient->createSurface( + String8("A Surface"), + w, + h, + PIXEL_FORMAT_RGBA_8888, + 0); + + SurfaceComposerClient::openGlobalTransaction(); + control->setLayer(INT_MAX); + control->show(); + SurfaceComposerClient::closeGlobalTransaction(); + + + sp window; + window = control->getSurface(); + + ANativeWindowBuffer *anb; + native_window_dequeue_buffer_and_wait(window.get(), &anb); + + sp buf(new GraphicBuffer(anb, false)); + + uint8_t* img = NULL; + buf->lock(GRALLOC_USAGE_SW_WRITE_OFTEN, (void**)(&img)); + printf("width=%d, height=%d, stride=%d\n", + buf->getWidth(), buf->getHeight(), buf->getStride()); + + fillRGBA8BMP(img, src, w, h, buf->getStride(), bpp); + buf->unlock(); + window->queueBuffer(window.get(), buf->getNativeBuffer(), -1); + getchar(); + + free(src); + close(fd); +} diff --git a/multimedia/03_day/test-looper/Android.mk b/multimedia/03_day/test-looper/Android.mk new file mode 100644 index 0000000..6e1b0a4 --- /dev/null +++ b/multimedia/03_day/test-looper/Android.mk @@ -0,0 +1,9 @@ +LOCAL_PATH:= $(call my-dir) + +include $(CLEAR_VARS) +LOCAL_SHARED_LIBRARIES := liblog libutils libmedia +LOCAL_SRC_FILES := my_looper.cpp +LOCAL_MODULE := my_looper +include $(BUILD_EXECUTABLE) + + diff --git a/multimedia/03_day/test-looper/my_looper.cpp b/multimedia/03_day/test-looper/my_looper.cpp new file mode 100644 index 0000000..e060e6d --- /dev/null +++ b/multimedia/03_day/test-looper/my_looper.cpp @@ -0,0 +1,263 @@ +#if 1 +#include +#include +#include + +using namespace android; + +class MyThread : public Thread { + sp mLooper; + sp mHandler; + public : + MyThread( sp &looper , sp &handler ) + : mLooper(looper),mHandler(handler){} + bool threadLoop() { + mLooper->sendMessageDelayed( s2ns(3), mHandler, Message(3) ); + return false; + } +}; + +class MyMessageHandler : public MessageHandler +{ +public: + virtual void handleMessage(const Message& message) + { + printf("MyMessageHandler::handleMessage(%d)\n", message.what ); + } +}; + +int main() +{ + int ret; + sp looper = new Looper(true); + sp handler = new MyMessageHandler; + sp thread = new MyThread( looper, handler ); + + thread->run(); + + while( 1) + { + ret = looper->pollOnce(-1); + + if( ret == Looper::POLL_WAKE ) + printf("Looper::POLL_WAKE\n"); + if( ret == Looper::POLL_CALLBACK ) + printf("Looper::POLL_CALLBACK\n"); + } + return 0; +} +#endif +#if 0 +#include +#include +#include + +using namespace android; + +class MyThread : public Thread { + sp mLooper; + int mFd; + public : + MyThread( sp &looper , int fd ) : mLooper(looper),mFd(fd){} + bool threadLoop() { + sleep(3); + write( mFd, "W", 1 ); + return false; + } +}; + +class CallbackHandler +{ +public: + void setCallback( sp &looper, int fd, int events ) + { + looper->addFd( fd, 0, events, callbackFunc, this ); + } + static int callbackFunc(int fd, int events, void* data) + { + return ((CallbackHandler*)data)->handler( fd, events, data ); + } + virtual ~CallbackHandler(){} + virtual int handler(int fd, int events, void* data) = 0; +}; + +class MyCallback : public CallbackHandler +{ +public: + int handler(int fd, int events, void* data) + { + printf("MyCallback::handler(%d, %d, %p)\n", fd, events, data); + return 0; + } +}; + +int main() +{ + int ret; + int fds[2]; + pipe(fds); + MyCallback callback; + sp looper = new Looper(true); + sp thread = new MyThread( looper, fds[1] ); + + callback.setCallback( looper, fds[0], Looper::EVENT_INPUT ); + thread->run(); + + while( 1) + { + ret = looper->pollOnce(-1); + + if( ret == Looper::POLL_WAKE ) + printf("Looper::POLL_WAKE\n"); + if( ret == Looper::POLL_CALLBACK ) + printf("Looper::POLL_CALLBACK\n"); + } + return 0; +} +#endif +#if 0 +#include +#include +#include + +using namespace android; + +class MyThread : public Thread { + sp mLooper; + int mFd; + public : + MyThread( sp &looper , int fd ) : mLooper(looper),mFd(fd){} + bool threadLoop() { + sleep(3); + write( mFd, "W", 1 ); + return false; + } +}; + +class MyCallback +{ +public: + void setCallback( sp &looper, int fd, int events ) + { + looper->addFd( fd, 0, events, callbackFunc, this ); + } + static int callbackFunc(int fd, int events, void* data) + { + return ((MyCallback*)data)->handler( fd, events, data ); + } + + int handler(int fd, int events, void* data) + { + printf("MyCallback::handler(%d, %d, %p)\n", fd, events, data); + return 0; + } +}; + +int main() +{ + int ret; + int fds[2]; + pipe(fds); + MyCallback callback; + sp looper = new Looper(true); + sp thread = new MyThread( looper, fds[1] ); + + callback.setCallback( looper, fds[0], Looper::EVENT_INPUT ); + thread->run(); + + while( 1) + { + ret = looper->pollOnce(-1); + + if( ret == Looper::POLL_WAKE ) + printf("Looper::POLL_WAKE\n"); + if( ret == Looper::POLL_CALLBACK ) + printf("Looper::POLL_CALLBACK\n"); + } + return 0; +} +#endif +#if 0 +#include +#include +#include + +using namespace android; + +class MyThread : public Thread { + sp mLooper; + int mFd; + public : + MyThread( sp &looper , int fd ) : mLooper(looper),mFd(fd){} + bool threadLoop() { + sleep(3); + write( mFd, "W", 1 ); + return false; + } +}; + + +int foo(int fd, int events, void* data) +{ + printf("foo(%d, %d, %p)\n", fd, events, data); + return 0; +} + +int main() +{ + int ret; + int fds[2]; + pipe(fds); + sp looper = new Looper(true); + sp thread = new MyThread( looper, fds[1] ); + + looper->addFd( fds[0], Looper::POLL_CALLBACK, Looper::EVENT_INPUT, foo, 0 ); + thread->run(); + + while( 1) + { + ret = looper->pollOnce(-1); + + if( ret == Looper::POLL_WAKE ) + printf("Looper::POLL_WAKE\n"); + if( ret == Looper::POLL_CALLBACK ) + printf("Looper::POLL_CALLBACK\n"); + } + return 0; +} +#endif +#if 0 +#include +#include +#include + +using namespace android; + +class MyThread : public Thread { + sp mLooper; + public : + MyThread( sp &looper ) : mLooper(looper){} + bool threadLoop() { + sleep(3); + mLooper->wake(); + return false; + } +}; +int main() +{ + int ret; + sp looper = new Looper(true); + sp thread = new MyThread( looper ); + thread->run(); + + while( 1) + { + ret = looper->pollOnce(-1); + + if( ret == Looper::POLL_WAKE ) + printf("Looper::POLL_WAKE\n"); + } + return 0; +} +#endif + diff --git a/multimedia/03_day/test-vsync/Android.mk b/multimedia/03_day/test-vsync/Android.mk new file mode 100644 index 0000000..9181760 --- /dev/null +++ b/multimedia/03_day/test-vsync/Android.mk @@ -0,0 +1,18 @@ +LOCAL_PATH:= $(call my-dir) +include $(CLEAR_VARS) + +LOCAL_SRC_FILES:= \ + vsync.cpp + +LOCAL_SHARED_LIBRARIES := \ + libcutils \ + libutils \ + libbinder \ + libui \ + libgui + +LOCAL_MODULE:= test-vsync-events + +LOCAL_MODULE_TAGS := tests + +include $(BUILD_EXECUTABLE) diff --git a/multimedia/03_day/test-vsync/vsync.cpp b/multimedia/03_day/test-vsync/vsync.cpp new file mode 100644 index 0000000..317f604 --- /dev/null +++ b/multimedia/03_day/test-vsync/vsync.cpp @@ -0,0 +1,68 @@ +#include +#include +#include + +using namespace android; + +int receiver(int fd, int events, void* data) +{ + DisplayEventReceiver* q = (DisplayEventReceiver*)data; + + ssize_t n; + DisplayEventReceiver::Event buffer[1]; + + static nsecs_t oldTimeStamp = 0; + + while ((n = q->getEvents(buffer, 1)) > 0) { + for (int i=0 ; i loop = new Looper(false); + loop->addFd(myDisplayEvent.getFd(), 0, ALOOPER_EVENT_INPUT, receiver, + &myDisplayEvent); + + myDisplayEvent.setVsyncRate(1); + + do { + //printf("about to poll...\n"); + int32_t ret = loop->pollOnce(-1); + switch (ret) { + case ALOOPER_POLL_WAKE: + //("ALOOPER_POLL_WAKE\n"); + break; + case ALOOPER_POLL_CALLBACK: + //("ALOOPER_POLL_CALLBACK\n"); + break; + case ALOOPER_POLL_TIMEOUT: + printf("ALOOPER_POLL_TIMEOUT\n"); + break; + case ALOOPER_POLL_ERROR: + printf("ALOOPER_POLL_TIMEOUT\n"); + break; + default: + printf("ugh? poll returned %d\n", ret); + break; + } + } while (1); + + return 0; +} diff --git a/multimedia/04_day/test-openmax/ATMParser.cpp b/multimedia/04_day/test-openmax/ATMParser.cpp new file mode 100644 index 0000000..316160a --- /dev/null +++ b/multimedia/04_day/test-openmax/ATMParser.cpp @@ -0,0 +1,77 @@ +/* +** +** Copyright 2013, kod21236@gmail.com +** +** Licensed under the Apache License, Version 2.0 (the "License"); +** you may not use this file except in compliance with the License. +** You may obtain a copy of the License at +** +** http://www.apache.org/licenses/LICENSE-2.0 +** +** Unless required by applicable law or agreed to in writing, software +** distributed under the License is distributed on an "AS IS" BASIS, +** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +** See the License for the specific language governing permissions and +** limitations under the License. +*/ + +//#define LOG_NDEBUG 0 +#define LOG_TAG "ATMParser" + +#include "ATMParser.h" +#include +#include + +#define LOGD ALOGD + + + +namespace anatomy { + +static char pszSource[] = "\ +M9a1X1M13B1M55/1M9B1Z1a1M11B1W1a1M54/1M8Z1r1X2M10@1r1S1;1Z1M53/1M9Z1X1r1M3\ +W2B1W2M3r1X2M6r1X1M12Z1i1X1M4X1;1X1M24/1M10B1X1S2X7S2X1W1M7.1 1Z1M11X1 2X1M3:1 2\ +M6@1M17/1M9B1S2X2a1S1a1S1a1S1a1S3a1W1M7 1r1:1i1M3S1 2M5 1X1M5 1M4:1 2,1M16/1M8X5\ +S1X1S2a1S2X6a1M6 2S1 1S1M1B1 1W1,1 1M4 1S1M5 1M3Z1 1M2 1S1M15/1M6@1X3Z1B5S2a1B5a\ +1X2S1M5 1S1M1 1X1M1:1 1;1i1 1M4 1S1M5 1M3i1 1M2.1,1M15/1M6X1S2X1@1M5a1X1B1M5B1S2\ +X1a1M4 1W1M1 1Z1M1X1 1M2i1M4 1Z1M5 1M3a1 1M2 1X1M1Z1 1a1M11/1M5X2S1a1S1Z6S2a1Z1B\ +1Z4S1a1S1X1B1M2.1 2Z1 2W1M1 3;1M2X1 4M2 5M2.1 3M2B1 2M11/1M4W1X1S1a1S1a1S2X3S2a1\ +S2X3S2a3S1X1M2B1Z1B1M1B2M3B1@1M3@1Z1@2W1M2W1Z1@1W1@1M3W1B1M3a1 1W1M11/1M2@1B1S2a\ +1S1a1S1a1S3a1S1a1S1a1S1a1S1a1S1a1S1a1S1a1X1Z1M35B1Z1M12/1M1B1r1X2a1S1X20S2a1r1X2\ +Z1M46/1Z1S2a1S2Z1M6@1M5@1M1@1M5@1S2X1a1S1r1B1M45/1X1Z1S1Z1X1S1Z1M21S1a1X1a1S1X2M\ +45/1B1S1a2S2a1X19S1X1a2S1a2X1a1M45/1M1B1r1S1X1a2S3X1S1X1S9X1S6a1X1a1X2@1M45/1M2B\ +1X3a4Z1a18S1r1X1Z1M47/1M4W1X1S1a8Z1a12S1X1Z1M23a1X1M24/1M5S1X1S1a6Z1a13S1X1M4B1W\ +1M11W1B1M5 2M5Z1X1M17/1M5@1X1a1S1a8Z1a7Z1a2X1Z1M2X1 3X1M2 2,1 2M2 4M2r1 4:1M2 4M\ +2 6W2 2S1i1 1.1/1M6Z1X1a10Z1a8X2M3a1:1M1 2M2@1 1i1M1 1M2.1W1r1 1M4 1:1M3B1 1M2 1\ +i1M1i1 1M1 1M1 1X1M1X1 1M2 1M1/1M7a1X1a2Z1a13Z1S1X1W1M3B1 2S1X1M3 1M2 1M2i1 1;1i\ +1M4.1r1M1@1M1X1 1M2i1 1M1X1,1M1 1M1,1r1M2 1S2i1M1/1M8Z1X1a5S1a1S1a6Z1S1X1W1M4 2Z\ +2 2M2 1Z1M1 1W1B1 2M1 2r1M2 2a1 1M1@1 1W1M1 1S1M1r1 1M1 1M1 1i1M3 2M2/1M9X1r1S1a\ +1Z1S3a1S1a1S1a3X3@1M4X1 2i1 1.1M1.1 2r1 2M1 2.1 2W1M2r1 2a1M2X1 2i1M2 2,1 1,1 2M\ +3.1 1M2/1M9X1S1X3S1X9S1a1X1Z1M46B1 2Z1M2/1M9X1S1Z1S1X1M3@3M2X1a2S1r1B1M46W1 1.1M\ +3/1M9@1a1Z3M8W1a1Z1a1Z1M53"; + +ATMParser::ATMParser() + : mOffset(0), + mLength(sizeof(pszSource)) { + +} + +void ATMParser::read(void* buffer, int* length) { + int i = 0; + + if ( mOffset < mLength ) + { + for ( i = mOffset+1; i < mLength; i++ ) + { + if ( !isdigit(pszSource[i]) ) + break; + } + *length = i - mOffset + 1; + memcpy(buffer, &pszSource[mOffset], *length); + mOffset = i; + LOGD("mOffset : %d , mLength : %d, *length : %d", mOffset, mLength, *length); + } +}; + +} + diff --git a/multimedia/04_day/test-openmax/ATMParser.h b/multimedia/04_day/test-openmax/ATMParser.h new file mode 100644 index 0000000..a392b92 --- /dev/null +++ b/multimedia/04_day/test-openmax/ATMParser.h @@ -0,0 +1,35 @@ +/* +** +** Copyright 2013, kod21236@gmail.com +** +** Licensed under the Apache License, Version 2.0 (the "License"); +** you may not use this file except in compliance with the License. +** You may obtain a copy of the License at +** +** http://www.apache.org/licenses/LICENSE-2.0 +** +** Unless required by applicable law or agreed to in writing, software +** distributed under the License is distributed on an "AS IS" BASIS, +** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +** See the License for the specific language governing permissions and +** limitations under the License. +*/ +#ifndef ATM_PARSER_H_ +#define ATM_PARSER_H_ + +namespace anatomy { + +class ATMParser { +public: + ATMParser(); + virtual ~ATMParser(){} + void read(void* buffer,int* length); +private: + int mOffset; + int mLength; +}; + +} // namespace anatomy + +#endif // ATM_PARSER_H_ + diff --git a/multimedia/04_day/test-openmax/AnatomyOMXClient.cpp b/multimedia/04_day/test-openmax/AnatomyOMXClient.cpp new file mode 100644 index 0000000..8628ead --- /dev/null +++ b/multimedia/04_day/test-openmax/AnatomyOMXClient.cpp @@ -0,0 +1,263 @@ +/* +** +** Copyright 2013, kod21236@gmail.com +** +** Licensed under the Apache License, Version 2.0 (the "License"); +** you may not use this file except in compliance with the License. +** You may obtain a copy of the License at +** +** http://www.apache.org/licenses/LICENSE-2.0 +** +** Unless required by applicable law or agreed to in writing, software +** distributed under the License is distributed on an "AS IS" BASIS, +** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +** See the License for the specific language governing permissions and +** limitations under the License. +*/ + +//#define LOG_NDEBUG 0 +#define LOG_TAG "AnatomyOMXClient" + +#include "AnatomyOMXClient.h" +#include "AnatomyOMXCore.h" + +#include +#include + +#define LOGW ALOGW +#define LOGD ALOGD + + +namespace anatomy { + + +template +static void InitOMXParams(T *params) { + params->nSize = sizeof(T); + params->nVersion.s.nVersionMajor = 1; + params->nVersion.s.nVersionMinor = 0; + params->nVersion.s.nRevision = 0; + params->nVersion.s.nStep = 0; +} + +void AnatomyOMXClient::prepare() { + init(); + + changeState(OMX_StateIdle); + allocateBuffers(); + sleep(1); + + changeState(OMX_StateExecuting); + sleep(1); +} + +void AnatomyOMXClient::stop() { + changeState(OMX_StateIdle); + sleep(1); + changeState(OMX_StateLoaded); + freeBuffers(); + sleep(1); + + deinit(); +} + +void AnatomyOMXClient::init() { + OMX_HANDLETYPE handle = NULL; + char componentName[256]; + OMX_Init(); + OMX_ComponentNameEnum(componentName, 256, 0); + OMX_GetHandle(&handle, componentName, this, &AnatomyOMXClient::kCallbacks); + CHECK(handle != NULL); + + mComponentHandle = handle; +} +void AnatomyOMXClient::changeState(OMX_STATETYPE state) { + OMX_SendCommand(mComponentHandle, OMX_CommandStateSet, state, NULL); +} +void AnatomyOMXClient::allocateBuffers() { + OMX_PARAM_PORTDEFINITIONTYPE def; + InitOMXParams(&def); + def.nPortIndex = kPortIndexInput; + + OMX_GetParameter(mComponentHandle, OMX_IndexParamPortDefinition, &def); + + OMX_BUFFERHEADERTYPE *pInputHeader; + for(uint i = 0; i < def.nBufferCountActual; i++ ) + { + OMX_AllocateBuffer(mComponentHandle, &pInputHeader, 0, this, def.nBufferSize); + addOMXBufferInfo(kPortIndexInput, pInputHeader); + } + + InitOMXParams(&def); + def.nPortIndex = kPortIndexOutput; + + OMX_GetParameter(mComponentHandle, OMX_IndexParamPortDefinition, &def); + OMX_BUFFERHEADERTYPE *pOutputHeader; + for(uint i = 0; i < def.nBufferCountActual; i++ ) + { + OMX_AllocateBuffer(mComponentHandle, &pOutputHeader, 1, this, def.nBufferSize); + addOMXBufferInfo(kPortIndexOutput, pOutputHeader); + } +} + +void AnatomyOMXClient::excute() { + int nSize = 0; + OMX_BUFFERHEADERTYPE *pInputHeader = NULL; + OMX_BUFFERHEADERTYPE *pOutputHeader = NULL; + + for(size_t i = 0; i < mBufferInfo.size(); i++) { + if( mBufferInfo[i].mPortIndex == kPortIndexInput ) { + pInputHeader = mBufferInfo[i].mBufferHeader; + read((char*)(pInputHeader->pBuffer), &nSize); + OMX_EmptyThisBuffer(mComponentHandle, pInputHeader); + removeOMXBufferInfo(kPortIndexInput, mBufferInfo[i].mBufferHeader); + } + if( mBufferInfo[i].mPortIndex == kPortIndexOutput ) { + pOutputHeader = mBufferInfo[i].mBufferHeader; + OMX_FillThisBuffer(mComponentHandle, pOutputHeader); + removeOMXBufferInfo(kPortIndexOutput, mBufferInfo[i].mBufferHeader); + } + } +} +void AnatomyOMXClient::deinit() { + OMX_FreeHandle(mComponentHandle); + OMX_Deinit(); +} + +void AnatomyOMXClient::addOMXBufferInfo( + OMX_U32 portIndex, OMX_BUFFERHEADERTYPE* header) { + OMXClientBufferInfo info; + info.mPortIndex = portIndex; + info.mBufferHeader = header; + mBufferInfo.push(info); +} + +void AnatomyOMXClient::removeOMXBufferInfo( + OMX_U32 portIndex, OMX_BUFFERHEADERTYPE* header) { + bool found = false; + for (size_t i = 0; i < mBufferInfo.size(); i++) { + if (mBufferInfo[i].mPortIndex == portIndex + && mBufferInfo[i].mBufferHeader == header) { + found = true; + mBufferInfo.removeItemsAt(i); + break; + } + } + + if (!found) { + LOGW("Attempt to remove an active buffer we know nothing about..."); + } +} + +void AnatomyOMXClient::freeBuffers() { + LOGD("============================================="); + for (size_t i = mBufferInfo.size(); i--;) { + int port_index= mBufferInfo[i].mPortIndex; + OMX_BUFFERHEADERTYPE *header = mBufferInfo[i].mBufferHeader; + LOGD("mBufferInfo[%d] mPortIndex : %d, mBufferHeader %p",i, port_index, header); + removeOMXBufferInfo(port_index, header); + OMX_FreeBuffer(mComponentHandle, port_index, header); + } + LOGD("============================================="); +} + + +// static +OMX_CALLBACKTYPE AnatomyOMXClient::kCallbacks = { + &OnEvent, &OnEmptyBufferDone, &OnFillBufferDone +}; + +OMX_ERRORTYPE AnatomyOMXClient::OnFillBufferDone( + OMX_IN OMX_HANDLETYPE hComponent, + OMX_IN OMX_PTR pAppData, + OMX_IN OMX_BUFFERHEADERTYPE *pBuffer) { + + AnatomyOMXClient* omxclient = reinterpret_cast(pAppData); + CHECK( NULL != omxclient); + CHECK( NULL != pBuffer ); + + omxclient->addOMXBufferInfo(kPortIndexOutput, pBuffer); + + if( pBuffer->nFlags & OMX_BUFFERFLAG_EOS || omxclient->getStatus() == 1 ) + { + omxclient->signalEOF(); + omxclient->setStatus(1); + return OMX_ErrorNone; + } + + if( pBuffer->nSize > 0 ) + { + omxclient->render(pBuffer->pBuffer, pBuffer->nSize); + } + + OMX_FillThisBuffer(hComponent, pBuffer); + omxclient->removeOMXBufferInfo(kPortIndexOutput, pBuffer); + return OMX_ErrorNone; +} + +OMX_ERRORTYPE AnatomyOMXClient::OnEmptyBufferDone( + OMX_IN OMX_HANDLETYPE hComponent, + OMX_IN OMX_PTR pAppData, + OMX_IN OMX_BUFFERHEADERTYPE *pBuffer) { + + AnatomyOMXClient* omxclient = reinterpret_cast(pAppData); + int nSize = 0; + omxclient->addOMXBufferInfo(kPortIndexInput, pBuffer); + CHECK(NULL != omxclient); + CHECK( NULL != pBuffer ); + + if( pBuffer->nFlags & OMX_BUFFERFLAG_EOS ) + { + return OMX_ErrorNone; + } + + omxclient->read(pBuffer->pBuffer, &nSize); + + if( nSize <= 0 ) { + pBuffer->nSize = 0; + pBuffer->nFlags |= OMX_BUFFERFLAG_EOS; + } + else { + pBuffer->nSize = nSize; + } + + OMX_EmptyThisBuffer(hComponent, pBuffer); + omxclient->removeOMXBufferInfo(kPortIndexInput, pBuffer); + return OMX_ErrorNone; +} + +OMX_ERRORTYPE AnatomyOMXClient::OnEvent( + OMX_IN OMX_HANDLETYPE hComponent, + OMX_IN OMX_PTR pAppData, + OMX_IN OMX_EVENTTYPE eEvent, + OMX_IN OMX_U32 nData1, + OMX_IN OMX_U32 nData2, + OMX_IN OMX_PTR pEventData) { + AnatomyOMXClient* omxclient = reinterpret_cast(pAppData); + switch(eEvent) + { + case OMX_EventCmdComplete: + if (OMX_CommandStateSet == nData1) + { + switch (nData2) + { + case OMX_StateLoaded: + omxclient->signalLoadedState(); + break; + case OMX_StateIdle: + omxclient->signalIdleState(); + break; + case OMX_StateExecuting: + omxclient->signalExcutingState(); + break; + } + } + break; + case OMX_EventError: + default: + break; + } + return OMX_ErrorNone; +} +} + diff --git a/multimedia/04_day/test-openmax/AnatomyOMXClient.h b/multimedia/04_day/test-openmax/AnatomyOMXClient.h new file mode 100644 index 0000000..b71f644 --- /dev/null +++ b/multimedia/04_day/test-openmax/AnatomyOMXClient.h @@ -0,0 +1,113 @@ +/* +** +** Copyright 2013, kod21236@gmail.com +** +** Licensed under the Apache License, Version 2.0 (the "License"); +** you may not use this file except in compliance with the License. +** You may obtain a copy of the License at +** +** http://www.apache.org/licenses/LICENSE-2.0 +** +** Unless required by applicable law or agreed to in writing, software +** distributed under the License is distributed on an "AS IS" BASIS, +** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +** See the License for the specific language governing permissions and +** limitations under the License. +*/ + +#ifndef ANATOMY_OMXCLIENT_H_ +#define ANATOMY_OMXCLIENT_H_ + +#include "ATMParser.h" +#include "StdoutRenderer.h" +#include "OMX_Types.h" +#include "OMX_Core.h" +#include "OMX_Component.h" + +#include +#include + +namespace anatomy { + +struct AnatomyOMXClient { +public: + AnatomyOMXClient(){} + virtual ~AnatomyOMXClient(){} + void prepare(); + void start() { excute(); waitEOF();} + void stop(); + +private: + void waitEOF() { mEOSCondition.wait(mLock); } + void changeState(OMX_STATETYPE state); + void allocateBuffers(); + void freeBuffers(); + void init(); + void deinit(); + void excute(); + void waitIdleState() { mIdleCondition.wait(mLock); } + void waitExcutingState() { mExcutingCondition.wait(mLock);} + void waitLoadedState() { mLoadedCondition.wait(mLock);} + + void render(void* buffer,int length){mRenderer.render(buffer, length);} + void read(void* buffer,int* length){mParser.read(buffer, length);} + + void signalEOF() { mEOSCondition.signal(); } + void signalIdleState() { mIdleCondition.signal(); } + void signalExcutingState() { mExcutingCondition.signal(); } + void signalLoadedState() { mLoadedCondition.signal(); } + + void addOMXBufferInfo(OMX_U32 portIndex, OMX_BUFFERHEADERTYPE* header); + void removeOMXBufferInfo(OMX_U32 portIndex, OMX_BUFFERHEADERTYPE* header); + + void setStatus(int status){ mStatus = status; } + int getStatus(){ return mStatus; } + + static OMX_CALLBACKTYPE kCallbacks; + + android::Condition mEOSCondition; + android::Condition mExcutingCondition; + android::Condition mIdleCondition; + android::Condition mLoadedCondition; + android::Mutex mLock; + ATMParser mParser; + StdoutRenderer mRenderer; + OMX_HANDLETYPE mComponentHandle; + int mStatus; + + enum { + kPortIndexInput = 0, + kPortIndexOutput = 1 + }; + + struct OMXClientBufferInfo { + OMX_U32 mPortIndex; + OMX_BUFFERHEADERTYPE* mBufferHeader; + }; + + android::Vector mBufferInfo; + + static OMX_ERRORTYPE OnEvent( + OMX_IN OMX_HANDLETYPE hComponent, + OMX_IN OMX_PTR pAppData, + OMX_IN OMX_EVENTTYPE eEvent, + OMX_IN OMX_U32 nData1, + OMX_IN OMX_U32 nData2, + OMX_IN OMX_PTR pEventData); + + static OMX_ERRORTYPE OnEmptyBufferDone( + OMX_IN OMX_HANDLETYPE hComponent, + OMX_IN OMX_PTR pAppData, + OMX_IN OMX_BUFFERHEADERTYPE *pBuffer); + + + static OMX_ERRORTYPE OnFillBufferDone( + OMX_IN OMX_HANDLETYPE hComponent, + OMX_IN OMX_PTR pAppData, + OMX_IN OMX_BUFFERHEADERTYPE *pBuffer); +}; + +} // namespace anatomy + +#endif // ANATOMY_OMXCLIENT_H_ + diff --git a/multimedia/04_day/test-openmax/AnatomyOMXComponent.cpp b/multimedia/04_day/test-openmax/AnatomyOMXComponent.cpp new file mode 100644 index 0000000..ca637f0 --- /dev/null +++ b/multimedia/04_day/test-openmax/AnatomyOMXComponent.cpp @@ -0,0 +1,161 @@ +/* +** +** Copyright 2013, kod21236@gmail.com +** +** Licensed under the Apache License, Version 2.0 (the "License"); +** you may not use this file except in compliance with the License. +** You may obtain a copy of the License at +** +** http://www.apache.org/licenses/LICENSE-2.0 +** +** Unless required by applicable law or agreed to in writing, software +** distributed under the License is distributed on an "AS IS" BASIS, +** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +** See the License for the specific language governing permissions and +** limitations under the License. +*/ + +//#define LOG_NDEBUG 0 +#define LOG_TAG "AnatomyOMXComponent" +#include + +#include "AnatomyOMXComponent.h" + +#include +#include +#include +#include + +#define LOGD ALOGD + +namespace android { + + +template +static void InitOMXParams(T *params) { + params->nSize = sizeof(T); + params->nVersion.s.nVersionMajor = 1; + params->nVersion.s.nVersionMinor = 0; + params->nVersion.s.nRevision = 0; + params->nVersion.s.nStep = 0; +} + +AnatomyOMXComponent::AnatomyOMXComponent( + const char *name, + const OMX_CALLBACKTYPE *callbacks, + OMX_PTR appData, + OMX_COMPONENTTYPE **component) + : SimpleSoftOMXComponent(name, callbacks, appData, component) { + LOGD("anatomy component created"); + initPorts(); +} + +AnatomyOMXComponent::~AnatomyOMXComponent() { + List &outQueue = getPortQueue(kOutputPortIndex); + List &inQueue = getPortQueue(kInputPortIndex); + CHECK(outQueue.empty()); + CHECK(inQueue.empty()); +} + +void AnatomyOMXComponent::initPorts() { + OMX_PARAM_PORTDEFINITIONTYPE def; + InitOMXParams(&def); + + def.nPortIndex = kInputPortIndex; + def.eDir = OMX_DirInput; + def.nBufferCountMin = kNumInputBuffers; + def.nBufferCountActual = def.nBufferCountMin; + def.nBufferSize = 2048; + def.bEnabled = OMX_TRUE; + def.bPopulated = OMX_FALSE; + def.eDomain = OMX_PortDomainOther; + def.bBuffersContiguous = OMX_FALSE; + def.nBufferAlignment = 1;//TODO + + def.format.other.eFormat = OMX_OTHER_FormatVendorStartUnused; //TODO + + addPort(def); + + def.nPortIndex = kOutputPortIndex; + def.eDir = OMX_DirOutput; + def.nBufferCountMin = kNumOutputBuffers; + def.nBufferCountActual = def.nBufferCountMin; + def.nBufferSize = 2048; + def.bEnabled = OMX_TRUE; + def.bPopulated = OMX_FALSE; + def.eDomain = OMX_PortDomainOther; + def.bBuffersContiguous = OMX_FALSE; + def.nBufferAlignment = 1; + + def.format.other.eFormat = OMX_OTHER_FormatVendorStartUnused; //TODO + + addPort(def); +} + +void AnatomyOMXComponent::onQueueFilled(OMX_U32 portIndex) { + List &inQueue = getPortQueue(kInputPortIndex); + List &outQueue = getPortQueue(kOutputPortIndex); + + LOGD("onQueueFilled called"); + while (!inQueue.empty() && !outQueue.empty()) { + BufferInfo *inInfo = *inQueue.begin(); + OMX_BUFFERHEADERTYPE *inHeader = inInfo->mHeader; + + BufferInfo *outInfo = *outQueue.begin(); + OMX_BUFFERHEADERTYPE *outHeader = outInfo->mHeader; + + + if (inHeader->nFlags & OMX_BUFFERFLAG_EOS) { + inQueue.erase(inQueue.begin()); + inInfo->mOwnedByUs = false; + notifyEmptyBufferDone(inHeader); + + outHeader->nFilledLen = 0; + outHeader->nFlags = OMX_BUFFERFLAG_EOS; + + outQueue.erase(outQueue.begin()); + outInfo->mOwnedByUs = false; + notifyFillBufferDone(outHeader); + return; + } + + OMX_U32 decoded_size; + decodeBuffer(outHeader->pBuffer, inHeader->pBuffer, inHeader->nSize, &decoded_size); + usleep(10000); + outHeader->nSize = decoded_size; + + inQueue.erase(inQueue.begin()); + inInfo->mOwnedByUs = false; + inInfo = NULL; + notifyEmptyBufferDone(inHeader); + inHeader = NULL; + + outQueue.erase(outQueue.begin()); + outInfo->mOwnedByUs = false; + outInfo = NULL; + notifyFillBufferDone(outHeader); + outHeader = NULL; + } + +} +void AnatomyOMXComponent::decodeBuffer(OMX_U8* dst, OMX_U8* src, OMX_U32 size, OMX_U32* decoded_size) { + char ascii; + char* ascii_length; + int length = 0; + ascii = src[0]; + length = atoi((char *)&src[1]); + LOGD("decode value %c, %d",ascii,length); + *decoded_size = length; + for(int i = 0; i < length; i++) + { + dst[i] = ascii; + } +} + +} // namespace android +android::SoftOMXComponent *createSoftOMXComponent( + const char *name, const OMX_CALLBACKTYPE *callbacks, + OMX_PTR appData, OMX_COMPONENTTYPE **component) { + return new android::AnatomyOMXComponent(name, callbacks, appData, component); +} + diff --git a/multimedia/04_day/test-openmax/AnatomyOMXComponent.h b/multimedia/04_day/test-openmax/AnatomyOMXComponent.h new file mode 100644 index 0000000..7deb42d --- /dev/null +++ b/multimedia/04_day/test-openmax/AnatomyOMXComponent.h @@ -0,0 +1,54 @@ +/* +** +** Copyright 2013, kod21236@gmail.com +** +** Licensed under the Apache License, Version 2.0 (the "License"); +** you may not use this file except in compliance with the License. +** You may obtain a copy of the License at +** +** http://www.apache.org/licenses/LICENSE-2.0 +** +** Unless required by applicable law or agreed to in writing, software +** distributed under the License is distributed on an "AS IS" BASIS, +** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +** See the License for the specific language governing permissions and +** limitations under the License. +*/ + +#ifndef ANATOMY_OMX_COMPONENT_H_ + +#define ANATOMY_OMX_COMPONENT_H_ + +#include +#include + +namespace android { + +struct AnatomyOMXComponent : public SimpleSoftOMXComponent { + AnatomyOMXComponent(const char *name, + const OMX_CALLBACKTYPE *callbacks, + OMX_PTR appData, + OMX_COMPONENTTYPE **component); + +protected: + virtual ~AnatomyOMXComponent(); + virtual void onQueueFilled(OMX_U32 portIndex); + + +private: + enum { + kInputPortIndex = 0, + kOutputPortIndex = 1, + kNumInputBuffers = 2, + kNumOutputBuffers = 5, + }; + void initPorts(); + void decodeBuffer(OMX_U8* dst, OMX_U8* src, OMX_U32 size, OMX_U32* decoded_size); + + DISALLOW_EVIL_CONSTRUCTORS(AnatomyOMXComponent); +}; + +} // namespace android + +#endif // ANATOMY_OMX_COMPONENT_H_ + diff --git a/multimedia/04_day/test-openmax/AnatomyOMXCore.cpp b/multimedia/04_day/test-openmax/AnatomyOMXCore.cpp new file mode 100644 index 0000000..7aa4ce1 --- /dev/null +++ b/multimedia/04_day/test-openmax/AnatomyOMXCore.cpp @@ -0,0 +1,159 @@ +/* +** +** Copyright 2013, kod21236@gmail.com +** +** Licensed under the Apache License, Version 2.0 (the "License"); +** you may not use this file except in compliance with the License. +** You may obtain a copy of the License at +** +** http://www.apache.org/licenses/LICENSE-2.0 +** +** Unless required by applicable law or agreed to in writing, software +** distributed under the License is distributed on an "AS IS" BASIS, +** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +** See the License for the specific language governing permissions and +** limitations under the License. +*/ + +#include "AnatomyOMXCore.h" + +#include + +#include + +//#include +//#include + +#include + +#define LOGD ALOGD +#define LOGV ALOGV + +using namespace android; + +static const struct { + const char *mName; + const char *mLibName; + const char *mRole; + +} kComponentInfo[] = { + {"OMX.anatomy.atm.decoder", "libanatomy_atmdec.so", "ascii_decoder.atm" }, +}; + + +OMX_API OMX_ERRORTYPE OMX_APIENTRY +OMX_Init() +{ + return OMX_ErrorNone; +} + +OMX_API OMX_ERRORTYPE OMX_APIENTRY +OMX_Deinit() +{ + return OMX_ErrorNone; +} + +OMX_API OMX_ERRORTYPE OMX_APIENTRY +OMX_GetHandle(OMX_OUT OMX_HANDLETYPE* handle, + OMX_IN OMX_STRING componentName, + OMX_IN OMX_PTR appData, + OMX_IN OMX_CALLBACKTYPE* callBacks) +{ + if (strcmp(componentName, kComponentInfo[0].mName) == 0) { + AString libName; + libName.append(kComponentInfo[0].mLibName); + + + void *libHandle = dlopen(libName.c_str(), RTLD_NOW); + + + typedef SoftOMXComponent *(*CreateSoftOMXComponentFunc)( + const char *, const OMX_CALLBACKTYPE *, + OMX_PTR, OMX_COMPONENTTYPE **); + + CreateSoftOMXComponentFunc createSoftOMXComponent = + (CreateSoftOMXComponentFunc)dlsym( + libHandle, + "_Z22createSoftOMXComponentPKcPK16OMX_CALLBACKTYPE" + "PvPP17OMX_COMPONENTTYPE"); + + sp codec = + (*createSoftOMXComponent)(componentName, callBacks, appData, (OMX_COMPONENTTYPE **)handle); + + + codec->initCheck(); + codec->incStrong(handle); + codec->setLibHandle(libHandle); + + return OMX_ErrorNone; + } + + return OMX_ErrorInvalidComponentName; +} + +OMX_API OMX_ERRORTYPE OMX_APIENTRY +OMX_FreeHandle(OMX_IN OMX_HANDLETYPE hComp) +{ + SoftOMXComponent *me = + (SoftOMXComponent *) + ((OMX_COMPONENTTYPE *)hComp)->pComponentPrivate; + + me->prepareForDestruction(); + + void *libHandle = me->libHandle(); + + me = NULL; + + dlclose(libHandle); + libHandle = NULL; + + return OMX_ErrorNone; +} + +OMX_API OMX_ERRORTYPE OMX_APIENTRY +OMX_ComponentNameEnum(OMX_OUT OMX_STRING componentName, + OMX_IN OMX_U32 nameLen, + OMX_IN OMX_U32 index) +{ + if (index >= 1 ) { + return OMX_ErrorNoMore; + } + + strncpy(componentName, kComponentInfo[0].mName, nameLen); + + return OMX_ErrorNone; +} + +OMX_API OMX_ERRORTYPE OMX_APIENTRY +OMX_SetupTunnel(OMX_IN OMX_HANDLETYPE outputComponent, + OMX_IN OMX_U32 outputPort, + OMX_IN OMX_HANDLETYPE inputComponent, + OMX_IN OMX_U32 inputPort) +{ + return OMX_ErrorNotImplemented; +} + +OMX_API OMX_ERRORTYPE +OMX_GetContentPipe(OMX_OUT OMX_HANDLETYPE* pipe, + OMX_IN OMX_STRING uri) +{ + return OMX_ErrorNotImplemented; +} + + + +OMX_API OMX_ERRORTYPE +OMX_GetComponentsOfRole(OMX_IN OMX_STRING role, + OMX_INOUT OMX_U32* numComps, + OMX_INOUT OMX_U8** compNames) +{ + return OMX_ErrorNotImplemented; +} + +OMX_API OMX_ERRORTYPE +OMX_GetRolesOfComponent(OMX_IN OMX_STRING compName, + OMX_INOUT OMX_U32* numRoles, + OMX_OUT OMX_U8** roles) +{ + return OMX_ErrorNotImplemented; +} diff --git a/multimedia/04_day/test-openmax/AnatomyOMXCore.h b/multimedia/04_day/test-openmax/AnatomyOMXCore.h new file mode 100644 index 0000000..6c62f30 --- /dev/null +++ b/multimedia/04_day/test-openmax/AnatomyOMXCore.h @@ -0,0 +1,65 @@ +/* +** +** Copyright 2013, kod21236@gmail.com +** +** Licensed under the Apache License, Version 2.0 (the "License"); +** you may not use this file except in compliance with the License. +** You may obtain a copy of the License at +** +** http://www.apache.org/licenses/LICENSE-2.0 +** +** Unless required by applicable law or agreed to in writing, software +** distributed under the License is distributed on an "AS IS" BASIS, +** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +** See the License for the specific language governing permissions and +** limitations under the License. +*/ + +#ifndef ANATOMYOMXCORE_H_ +#define ANATOMYOMXCORE_H_ + +#include "OMX_Types.h" +#include "OMX_Core.h" +#include "OMX_Component.h" + +OMX_API OMX_ERRORTYPE OMX_APIENTRY +OMX_Init(); + +OMX_API OMX_ERRORTYPE OMX_APIENTRY +OMX_Deinit(); + +OMX_API OMX_ERRORTYPE OMX_APIENTRY +OMX_GetHandle(OMX_OUT OMX_HANDLETYPE* handle, + OMX_IN OMX_STRING componentName, + OMX_IN OMX_PTR appData, + OMX_IN OMX_CALLBACKTYPE* callBacks); + +OMX_API OMX_ERRORTYPE OMX_APIENTRY +OMX_FreeHandle(OMX_IN OMX_HANDLETYPE hComp); + +OMX_API OMX_ERRORTYPE OMX_APIENTRY +OMX_SetupTunnel(OMX_IN OMX_HANDLETYPE outputComponent, + OMX_IN OMX_U32 outputPort, + OMX_IN OMX_HANDLETYPE inputComponent, + OMX_IN OMX_U32 inputPort); + +OMX_API OMX_ERRORTYPE +OMX_GetContentPipe(OMX_OUT OMX_HANDLETYPE* pipe, + OMX_IN OMX_STRING uri); + +OMX_API OMX_ERRORTYPE OMX_APIENTRY +OMX_ComponentNameEnum(OMX_OUT OMX_STRING componentName, + OMX_IN OMX_U32 nameLen, + OMX_IN OMX_U32 index); + +OMX_API OMX_ERRORTYPE +OMX_GetComponentsOfRole(OMX_IN OMX_STRING role, + OMX_INOUT OMX_U32* numComps, + OMX_INOUT OMX_U8** compNames); + +OMX_API OMX_ERRORTYPE +OMX_GetRolesOfComponent(OMX_IN OMX_STRING compName, + OMX_INOUT OMX_U32* numRoles, + OMX_OUT OMX_U8** roles); + +#endif /* ANATOMYOMXCORE_H_ */ diff --git a/multimedia/04_day/test-openmax/AnatomyOMXPlugin.cpp b/multimedia/04_day/test-openmax/AnatomyOMXPlugin.cpp new file mode 100644 index 0000000..e4b586b --- /dev/null +++ b/multimedia/04_day/test-openmax/AnatomyOMXPlugin.cpp @@ -0,0 +1,170 @@ +/* +** +** Copyright 2013, kod21236@gmail.com +** +** Licensed under the Apache License, Version 2.0 (the "License"); +** you may not use this file except in compliance with the License. +** You may obtain a copy of the License at +** +** http://www.apache.org/licenses/LICENSE-2.0 +** +** Unless required by applicable law or agreed to in writing, software +** distributed under the License is distributed on an "AS IS" BASIS, +** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +** See the License for the specific language governing permissions and +** limitations under the License. +*/ + +//#define LOG_NDEBUG 0 +#define LOG_TAG "AnatomyOMXPlugin" +#include + +#include "AnatomyOMXPlugin.h" +#include + +#include +#include + +#include + +#define LOGV ALOGV +#define LOGE ALOGE + + +namespace android { + +static const struct { + const char *mName; + const char *mLibNameSuffix; + const char *mRole; + +} kComponents[] = { + { "OMX.anatomy.atm.decoder", "atmdec", "ascii_decoder.atm" }, +}; + +static const size_t kNumComponents = + sizeof(kComponents) / sizeof(kComponents[0]); + +AnatomyOMXPlugin::AnatomyOMXPlugin() { +} + +OMX_ERRORTYPE AnatomyOMXPlugin::makeComponentInstance( + const char *name, + const OMX_CALLBACKTYPE *callbacks, + OMX_PTR appData, + OMX_COMPONENTTYPE **component) { + LOGV("makeComponentInstance '%s'", name); + + for (size_t i = 0; i < kNumComponents; ++i) { + if (strcmp(name, kComponents[i].mName)) { + continue; + } + + AString libName = "libanatomy_"; + libName.append(kComponents[i].mLibNameSuffix); + libName.append(".so"); + + void *libHandle = dlopen(libName.c_str(), RTLD_NOW); + + if (libHandle == NULL) { + LOGE("unable to dlopen %s", libName.c_str()); + + return OMX_ErrorComponentNotFound; + } + + typedef SoftOMXComponent *(*CreateSoftOMXComponentFunc)( + const char *, const OMX_CALLBACKTYPE *, + OMX_PTR, OMX_COMPONENTTYPE **); + + CreateSoftOMXComponentFunc createSoftOMXComponent = + (CreateSoftOMXComponentFunc)dlsym( + libHandle, + "_Z22createSoftOMXComponentPKcPK16OMX_CALLBACKTYPE" + "PvPP17OMX_COMPONENTTYPE"); + + if (createSoftOMXComponent == NULL) { + dlclose(libHandle); + libHandle = NULL; + + return OMX_ErrorComponentNotFound; + } + + sp codec = + (*createSoftOMXComponent)(name, callbacks, appData, component); + + if (codec == NULL) { + dlclose(libHandle); + libHandle = NULL; + + return OMX_ErrorInsufficientResources; + } + + OMX_ERRORTYPE err = codec->initCheck(); + if (err != OMX_ErrorNone) { + dlclose(libHandle); + libHandle = NULL; + + return err; + } + + codec->incStrong(this); + codec->setLibHandle(libHandle); + + return OMX_ErrorNone; + } + + return OMX_ErrorInvalidComponentName; +} + +OMX_ERRORTYPE AnatomyOMXPlugin::destroyComponentInstance( + OMX_COMPONENTTYPE *component) { + SoftOMXComponent *me = + (SoftOMXComponent *) + ((OMX_COMPONENTTYPE *)component)->pComponentPrivate; + + me->prepareForDestruction(); + + void *libHandle = me->libHandle(); + + CHECK_EQ(me->getStrongCount(), 1); + me->decStrong(this); + me = NULL; + + dlclose(libHandle); + libHandle = NULL; + + return OMX_ErrorNone; +} + +OMX_ERRORTYPE AnatomyOMXPlugin::enumerateComponents( + OMX_STRING name, + size_t size, + OMX_U32 index) { + if (index >= kNumComponents) { + return OMX_ErrorNoMore; + } + + strcpy(name, kComponents[index].mName); + + return OMX_ErrorNone; +} + +OMX_ERRORTYPE AnatomyOMXPlugin::getRolesOfComponent( + const char *name, + Vector *roles) { + for (size_t i = 0; i < kNumComponents; ++i) { + if (strcmp(name, kComponents[i].mName)) { + continue; + } + + roles->clear(); + roles->push(String8(kComponents[i].mRole)); + + return OMX_ErrorNone; + } + + return OMX_ErrorInvalidComponentName; +} + +} // namespace android + diff --git a/multimedia/04_day/test-openmax/AnatomyOMXPlugin.h b/multimedia/04_day/test-openmax/AnatomyOMXPlugin.h new file mode 100644 index 0000000..1393734 --- /dev/null +++ b/multimedia/04_day/test-openmax/AnatomyOMXPlugin.h @@ -0,0 +1,55 @@ +/* +** +** Copyright 2013, kod21236@gmail.com +** +** Licensed under the Apache License, Version 2.0 (the "License"); +** you may not use this file except in compliance with the License. +** You may obtain a copy of the License at +** +** http://www.apache.org/licenses/LICENSE-2.0 +** +** Unless required by applicable law or agreed to in writing, software +** distributed under the License is distributed on an "AS IS" BASIS, +** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +** See the License for the specific language governing permissions and +** limitations under the License. +*/ + +#ifndef ANATOMY_OMX_PLUGIN_H_ + +#define ANATOMY_OMX_PLUGIN_H_ + +#include +#include + +namespace android { + +struct AnatomyOMXPlugin : public OMXPluginBase { + AnatomyOMXPlugin(); + + virtual OMX_ERRORTYPE makeComponentInstance( + const char *name, + const OMX_CALLBACKTYPE *callbacks, + OMX_PTR appData, + OMX_COMPONENTTYPE **component); + + virtual OMX_ERRORTYPE destroyComponentInstance( + OMX_COMPONENTTYPE *component); + + virtual OMX_ERRORTYPE enumerateComponents( + OMX_STRING name, + size_t size, + OMX_U32 index); + + virtual OMX_ERRORTYPE getRolesOfComponent( + const char *name, + Vector *roles); + +private: + DISALLOW_EVIL_CONSTRUCTORS(AnatomyOMXPlugin); +}; + +} // namespace android + +#endif // ANATOMY_OMX_PLUGIN_H_ + diff --git a/multimedia/04_day/test-openmax/Android.mk b/multimedia/04_day/test-openmax/Android.mk new file mode 100644 index 0000000..0628118 --- /dev/null +++ b/multimedia/04_day/test-openmax/Android.mk @@ -0,0 +1,59 @@ +LOCAL_PATH := $(call my-dir) + +include $(CLEAR_VARS) + +LOCAL_MODULE := test-openmax +LOCAL_MODULE_TAGS := tests + +LOCAL_C_INCLUDES := \ + $(LOCAL_PATH)/include \ + bionic/ \ + external/stlport/stlport \ + frameworks/base/include \ + frameworks/av/media/libstagefright \ + frameworks/native/include/media/openmax \ + + + +LOCAL_SRC_FILES := \ + AnatomyOMXCore.cpp \ + AnatomyOMXPlugin.cpp \ + main_anatomy_omxclient.cpp \ + AnatomyOMXClient.cpp \ + ATMParser.cpp \ + StdoutRenderer.cpp \ + +LOCAL_SHARED_LIBRARIES := \ + libcutils\ + libutils\ + libdl\ + libstlport \ + libstagefright_omx \ + libstagefright_foundation \ + +include $(BUILD_EXECUTABLE) + +include $(CLEAR_VARS) +LOCAL_MODULE := libanatomy_atmdec +LOCAL_MODULE_TAGS := tests + +LOCAL_C_INCLUDES := \ + $(LOCAL_PATH)/include \ + bionic/ \ + external/stlport/stlport \ + frameworks/base/include \ + frameworks/av/media/libstagefright \ + frameworks/native/include/media/openmax \ + +LOCAL_SHARED_LIBRARIES := \ + libcutils\ + libutils\ + libdl\ + libstlport \ + libstagefright_omx \ + libstagefright_foundation \ + +LOCAL_SRC_FILES := \ + AnatomyOMXComponent.cpp \ + +include $(BUILD_SHARED_LIBRARY) diff --git a/multimedia/04_day/test-openmax/StdoutRenderer.cpp b/multimedia/04_day/test-openmax/StdoutRenderer.cpp new file mode 100644 index 0000000..50b9f1c --- /dev/null +++ b/multimedia/04_day/test-openmax/StdoutRenderer.cpp @@ -0,0 +1,41 @@ +/* +** +** Copyright 2013, kod21236@gmail.com +** +** Licensed under the Apache License, Version 2.0 (the "License"); +** you may not use this file except in compliance with the License. +** You may obtain a copy of the License at +** +** http://www.apache.org/licenses/LICENSE-2.0 +** +** Unless required by applicable law or agreed to in writing, software +** distributed under the License is distributed on an "AS IS" BASIS, +** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +** See the License for the specific language governing permissions and +** limitations under the License. +*/ + +//#define LOG_NDEBUG 0 +#define LOG_TAG "StdoutRenderer" + +#include "StdoutRenderer.h" +#include + +namespace anatomy { + +void StdoutRenderer::render(void* buffer,int length) { + memcpy(mBuffer, buffer, length); + mBuffer[length] = '\0'; + if( mBuffer[0] == '/' ) + { + std::cout << std::endl; + } + else + { + std::cout << mBuffer; + } + +} +} + + diff --git a/multimedia/04_day/test-openmax/StdoutRenderer.h b/multimedia/04_day/test-openmax/StdoutRenderer.h new file mode 100644 index 0000000..6d6751a --- /dev/null +++ b/multimedia/04_day/test-openmax/StdoutRenderer.h @@ -0,0 +1,36 @@ +/* +** +** Copyright 2013, kod21236@gmail.com +** +** Licensed under the Apache License, Version 2.0 (the "License"); +** you may not use this file except in compliance with the License. +** You may obtain a copy of the License at +** +** http://www.apache.org/licenses/LICENSE-2.0 +** +** Unless required by applicable law or agreed to in writing, software +** distributed under the License is distributed on an "AS IS" BASIS, +** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +** See the License for the specific language governing permissions and +** limitations under the License. +*/ + +#ifndef STDOUT_RENDERER_H_ +#define STDOUT_RENDERER_H_ + + +namespace anatomy { + +class StdoutRenderer { +public: + StdoutRenderer(){} + virtual ~StdoutRenderer(){} + void render(void* buffer,int length); +private: + char mBuffer[256]; +}; + +} // namespace anatomy + +#endif // STDOUT_RENDERER_H_ + diff --git a/multimedia/04_day/test-openmax/main_anatomy_omxclient.cpp b/multimedia/04_day/test-openmax/main_anatomy_omxclient.cpp new file mode 100644 index 0000000..82dc3ec --- /dev/null +++ b/multimedia/04_day/test-openmax/main_anatomy_omxclient.cpp @@ -0,0 +1,41 @@ +/* +** +** Copyright 2013, kod21236@gmail.com +** +** Licensed under the Apache License, Version 2.0 (the "License"); +** you may not use this file except in compliance with the License. +** You may obtain a copy of the License at +** +** http://www.apache.org/licenses/LICENSE-2.0 +** +** Unless required by applicable law or agreed to in writing, software +** distributed under the License is distributed on an "AS IS" BASIS, +** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +** See the License for the specific language governing permissions and +** limitations under the License. +*/ + +#include "AnatomyOMXClient.h" + +#include +#include + +using namespace anatomy; + +int main() +{ + AnatomyOMXClient* pOMXClient = new AnatomyOMXClient; + + CHECK( NULL != pOMXClient ); + + pOMXClient->prepare(); + + pOMXClient->start(); + + pOMXClient->stop(); + + delete pOMXClient; + return 0; +} + + diff --git a/multimedia/04_day/test-openmax/tags b/multimedia/04_day/test-openmax/tags new file mode 100644 index 0000000..c1dfdc2 --- /dev/null +++ b/multimedia/04_day/test-openmax/tags @@ -0,0 +1,138 @@ +!_TAG_FILE_FORMAT 2 /extended format; --format=1 will not append ;" to lines/ +!_TAG_FILE_SORTED 1 /0=unsorted, 1=sorted, 2=foldcase/ +!_TAG_PROGRAM_AUTHOR Darren Hiebert /dhiebert@users.sourceforge.net/ +!_TAG_PROGRAM_NAME Exuberant Ctags // +!_TAG_PROGRAM_URL http://ctags.sourceforge.net /official site/ +!_TAG_PROGRAM_VERSION 5.9~svn20110310 // +ANATOMYOMXCORE_H_ AnatomyOMXCore.h 19;" d +ANATOMY_OMXCLIENT_H_ AnatomyOMXClient.h 19;" d +ANATOMY_OMX_COMPONENT_H_ AnatomyOMXComponent.h 20;" d +ANATOMY_OMX_PLUGIN_H_ AnatomyOMXPlugin.h 20;" d +ATMParser ATMParser.cpp /^ATMParser::ATMParser()$/;" f class:anatomy::ATMParser +ATMParser ATMParser.h /^class ATMParser {$/;" c namespace:anatomy +ATM_PARSER_H_ ATMParser.h 18;" d +AnatomyOMXClient AnatomyOMXClient.h /^ AnatomyOMXClient(){}$/;" f struct:anatomy::AnatomyOMXClient +AnatomyOMXClient AnatomyOMXClient.h /^struct AnatomyOMXClient {$/;" s namespace:anatomy +AnatomyOMXComponent AnatomyOMXComponent.cpp /^AnatomyOMXComponent::AnatomyOMXComponent($/;" f class:android::AnatomyOMXComponent +AnatomyOMXComponent AnatomyOMXComponent.h /^struct AnatomyOMXComponent : public SimpleSoftOMXComponent {$/;" s namespace:android +AnatomyOMXPlugin AnatomyOMXPlugin.cpp /^AnatomyOMXPlugin::AnatomyOMXPlugin() {$/;" f class:android::AnatomyOMXPlugin +AnatomyOMXPlugin AnatomyOMXPlugin.h /^struct AnatomyOMXPlugin : public OMXPluginBase {$/;" s namespace:android +InitOMXParams AnatomyOMXClient.cpp /^static void InitOMXParams(T *params) {$/;" f namespace:anatomy +InitOMXParams AnatomyOMXComponent.cpp /^static void InitOMXParams(T *params) {$/;" f namespace:android +LOCAL_C_INCLUDES Android.mk /^LOCAL_C_INCLUDES := \\$/;" m +LOCAL_MODULE Android.mk /^LOCAL_MODULE := anatomy_omx_client$/;" m +LOCAL_MODULE Android.mk /^LOCAL_MODULE := libanatomy_atmdec$/;" m +LOCAL_MODULE_TAGS Android.mk /^LOCAL_MODULE_TAGS := tests$/;" m +LOCAL_PATH Android.mk /^LOCAL_PATH := $(call my-dir)$/;" m +LOCAL_SHARED_LIBRARIES Android.mk /^LOCAL_SHARED_LIBRARIES := \\$/;" m +LOCAL_SRC_FILES Android.mk /^LOCAL_SRC_FILES := \\$/;" m +LOGD ATMParser.cpp 25;" d file: +LOGD AnatomyOMXClient.cpp 28;" d file: +LOGD AnatomyOMXComponent.cpp 29;" d file: +LOGD AnatomyOMXCore.cpp 29;" d file: +LOGE AnatomyOMXPlugin.cpp 31;" d file: +LOGV AnatomyOMXCore.cpp 30;" d file: +LOGV AnatomyOMXPlugin.cpp 30;" d file: +LOGW AnatomyOMXClient.cpp 27;" d file: +LOG_TAG ATMParser.cpp 19;" d file: +LOG_TAG AnatomyOMXClient.cpp 19;" d file: +LOG_TAG AnatomyOMXComponent.cpp 19;" d file: +LOG_TAG AnatomyOMXPlugin.cpp 19;" d file: +LOG_TAG StdoutRenderer.cpp 19;" d file: +OMXClientBufferInfo AnatomyOMXClient.h /^ struct OMXClientBufferInfo {$/;" s struct:anatomy::AnatomyOMXClient +OMX_ComponentNameEnum AnatomyOMXCore.cpp /^OMX_ComponentNameEnum(OMX_OUT OMX_STRING componentName,$/;" f +OMX_Deinit AnatomyOMXCore.cpp /^OMX_Deinit()$/;" f +OMX_FreeHandle AnatomyOMXCore.cpp /^OMX_FreeHandle(OMX_IN OMX_HANDLETYPE hComp)$/;" f +OMX_GetComponentsOfRole AnatomyOMXCore.cpp /^OMX_GetComponentsOfRole(OMX_IN OMX_STRING role,$/;" f +OMX_GetContentPipe AnatomyOMXCore.cpp /^OMX_GetContentPipe(OMX_OUT OMX_HANDLETYPE* pipe,$/;" f +OMX_GetHandle AnatomyOMXCore.cpp /^OMX_GetHandle(OMX_OUT OMX_HANDLETYPE* handle,$/;" f +OMX_GetRolesOfComponent AnatomyOMXCore.cpp /^OMX_GetRolesOfComponent(OMX_IN OMX_STRING compName,$/;" f +OMX_Init AnatomyOMXCore.cpp /^OMX_Init()$/;" f +OMX_SetupTunnel AnatomyOMXCore.cpp /^OMX_SetupTunnel(OMX_IN OMX_HANDLETYPE outputComponent,$/;" f +OnEmptyBufferDone AnatomyOMXClient.cpp /^OMX_ERRORTYPE AnatomyOMXClient::OnEmptyBufferDone($/;" f class:anatomy::AnatomyOMXClient +OnEvent AnatomyOMXClient.cpp /^OMX_ERRORTYPE AnatomyOMXClient::OnEvent($/;" f class:anatomy::AnatomyOMXClient +OnFillBufferDone AnatomyOMXClient.cpp /^OMX_ERRORTYPE AnatomyOMXClient::OnFillBufferDone($/;" f class:anatomy::AnatomyOMXClient +STDOUT_RENDERER_H_ StdoutRenderer.h 19;" d +StdoutRenderer StdoutRenderer.h /^ StdoutRenderer(){}$/;" f class:anatomy::StdoutRenderer +StdoutRenderer StdoutRenderer.h /^class StdoutRenderer {$/;" c namespace:anatomy +addOMXBufferInfo AnatomyOMXClient.cpp /^void AnatomyOMXClient::addOMXBufferInfo($/;" f class:anatomy::AnatomyOMXClient +allocateBuffers AnatomyOMXClient.cpp /^void AnatomyOMXClient::allocateBuffers() {$/;" f class:anatomy::AnatomyOMXClient +anatomy ATMParser.cpp /^namespace anatomy {$/;" n file: +anatomy ATMParser.h /^namespace anatomy {$/;" n +anatomy AnatomyOMXClient.cpp /^namespace anatomy {$/;" n file: +anatomy AnatomyOMXClient.h /^namespace anatomy {$/;" n +anatomy StdoutRenderer.cpp /^namespace anatomy {$/;" n file: +anatomy StdoutRenderer.h /^namespace anatomy {$/;" n +android AnatomyOMXComponent.cpp /^namespace android {$/;" n file: +android AnatomyOMXComponent.h /^namespace android {$/;" n +android AnatomyOMXPlugin.cpp /^namespace android {$/;" n file: +android AnatomyOMXPlugin.h /^namespace android {$/;" n +changeState AnatomyOMXClient.cpp /^void AnatomyOMXClient::changeState(OMX_STATETYPE state) {$/;" f class:anatomy::AnatomyOMXClient +createSoftOMXComponent AnatomyOMXComponent.cpp /^android::SoftOMXComponent *createSoftOMXComponent($/;" f +decodeBuffer AnatomyOMXComponent.cpp /^void AnatomyOMXComponent::decodeBuffer(OMX_U8* dst, OMX_U8* src, OMX_U32 size, OMX_U32* decoded_size) {$/;" f class:android::AnatomyOMXComponent +deinit AnatomyOMXClient.cpp /^void AnatomyOMXClient::deinit() {$/;" f class:anatomy::AnatomyOMXClient +destroyComponentInstance AnatomyOMXPlugin.cpp /^OMX_ERRORTYPE AnatomyOMXPlugin::destroyComponentInstance($/;" f class:android::AnatomyOMXPlugin +enumerateComponents AnatomyOMXPlugin.cpp /^OMX_ERRORTYPE AnatomyOMXPlugin::enumerateComponents($/;" f class:android::AnatomyOMXPlugin +excute AnatomyOMXClient.cpp /^void AnatomyOMXClient::excute() {$/;" f class:anatomy::AnatomyOMXClient +freeBuffers AnatomyOMXClient.cpp /^void AnatomyOMXClient::freeBuffers() {$/;" f class:anatomy::AnatomyOMXClient +getRolesOfComponent AnatomyOMXPlugin.cpp /^OMX_ERRORTYPE AnatomyOMXPlugin::getRolesOfComponent($/;" f class:android::AnatomyOMXPlugin +getStatus AnatomyOMXClient.h /^ int getStatus(){ return mStatus; }$/;" f struct:anatomy::AnatomyOMXClient +init AnatomyOMXClient.cpp /^void AnatomyOMXClient::init() {$/;" f class:anatomy::AnatomyOMXClient +initPorts AnatomyOMXComponent.cpp /^void AnatomyOMXComponent::initPorts() {$/;" f class:android::AnatomyOMXComponent +kCallbacks AnatomyOMXClient.cpp /^OMX_CALLBACKTYPE AnatomyOMXClient::kCallbacks = {$/;" m class:anatomy::AnatomyOMXClient file: +kCallbacks AnatomyOMXClient.h /^ static OMX_CALLBACKTYPE kCallbacks;$/;" m struct:anatomy::AnatomyOMXClient +kComponentInfo AnatomyOMXCore.cpp /^} kComponentInfo[] = {$/;" v typeref:struct:__anon4 file: +kComponents AnatomyOMXPlugin.cpp /^} kComponents[] = {$/;" m namespace:android typeref:struct:android::__anon3 file: +kInputPortIndex AnatomyOMXComponent.h /^ kInputPortIndex = 0,$/;" e enum:android::AnatomyOMXComponent::__anon2 +kNumComponents AnatomyOMXPlugin.cpp /^static const size_t kNumComponents =$/;" m namespace:android file: +kNumInputBuffers AnatomyOMXComponent.h /^ kNumInputBuffers = 2,$/;" e enum:android::AnatomyOMXComponent::__anon2 +kNumOutputBuffers AnatomyOMXComponent.h /^ kNumOutputBuffers = 5,$/;" e enum:android::AnatomyOMXComponent::__anon2 +kOutputPortIndex AnatomyOMXComponent.h /^ kOutputPortIndex = 1,$/;" e enum:android::AnatomyOMXComponent::__anon2 +kPortIndexInput AnatomyOMXClient.h /^ kPortIndexInput = 0,$/;" e enum:anatomy::AnatomyOMXClient::__anon1 +kPortIndexOutput AnatomyOMXClient.h /^ kPortIndexOutput = 1$/;" e enum:anatomy::AnatomyOMXClient::__anon1 +mBuffer StdoutRenderer.h /^ char mBuffer[256];$/;" m class:anatomy::StdoutRenderer +mBufferHeader AnatomyOMXClient.h /^ OMX_BUFFERHEADERTYPE* mBufferHeader;$/;" m struct:anatomy::AnatomyOMXClient::OMXClientBufferInfo +mBufferInfo AnatomyOMXClient.h /^ android::Vector mBufferInfo;$/;" m struct:anatomy::AnatomyOMXClient +mComponentHandle AnatomyOMXClient.h /^ OMX_HANDLETYPE mComponentHandle;$/;" m struct:anatomy::AnatomyOMXClient +mEOSCondition AnatomyOMXClient.h /^ android::Condition mEOSCondition;$/;" m struct:anatomy::AnatomyOMXClient +mExcutingCondition AnatomyOMXClient.h /^ android::Condition mExcutingCondition;$/;" m struct:anatomy::AnatomyOMXClient +mIdleCondition AnatomyOMXClient.h /^ android::Condition mIdleCondition;$/;" m struct:anatomy::AnatomyOMXClient +mLength ATMParser.h /^ int mLength;$/;" m class:anatomy::ATMParser +mLibName AnatomyOMXCore.cpp /^ const char *mLibName;$/;" m struct:__anon4 file: +mLibNameSuffix AnatomyOMXPlugin.cpp /^ const char *mLibNameSuffix;$/;" m struct:android::__anon3 file: +mLoadedCondition AnatomyOMXClient.h /^ android::Condition mLoadedCondition;$/;" m struct:anatomy::AnatomyOMXClient +mLock AnatomyOMXClient.h /^ android::Mutex mLock;$/;" m struct:anatomy::AnatomyOMXClient +mName AnatomyOMXCore.cpp /^ const char *mName;$/;" m struct:__anon4 file: +mName AnatomyOMXPlugin.cpp /^ const char *mName;$/;" m struct:android::__anon3 file: +mOffset ATMParser.h /^ int mOffset;$/;" m class:anatomy::ATMParser +mParser AnatomyOMXClient.h /^ ATMParser mParser;$/;" m struct:anatomy::AnatomyOMXClient +mPortIndex AnatomyOMXClient.h /^ OMX_U32 mPortIndex;$/;" m struct:anatomy::AnatomyOMXClient::OMXClientBufferInfo +mRenderer AnatomyOMXClient.h /^ StdoutRenderer mRenderer;$/;" m struct:anatomy::AnatomyOMXClient +mRole AnatomyOMXCore.cpp /^ const char *mRole;$/;" m struct:__anon4 file: +mRole AnatomyOMXPlugin.cpp /^ const char *mRole;$/;" m struct:android::__anon3 file: +mStatus AnatomyOMXClient.h /^ int mStatus;$/;" m struct:anatomy::AnatomyOMXClient +main main_anatomy_omxclient.cpp /^int main()$/;" f +makeComponentInstance AnatomyOMXPlugin.cpp /^OMX_ERRORTYPE AnatomyOMXPlugin::makeComponentInstance($/;" f class:android::AnatomyOMXPlugin +onQueueFilled AnatomyOMXComponent.cpp /^void AnatomyOMXComponent::onQueueFilled(OMX_U32 portIndex) {$/;" f class:android::AnatomyOMXComponent +prepare AnatomyOMXClient.cpp /^void AnatomyOMXClient::prepare() {$/;" f class:anatomy::AnatomyOMXClient +pszSource ATMParser.cpp /^static char pszSource[] = "\\$/;" m namespace:anatomy file: +read ATMParser.cpp /^void ATMParser::read(void* buffer, int* length) {$/;" f class:anatomy::ATMParser +read AnatomyOMXClient.h /^ void read(void* buffer,int* length){mParser.read(buffer, length);}$/;" f struct:anatomy::AnatomyOMXClient +removeOMXBufferInfo AnatomyOMXClient.cpp /^void AnatomyOMXClient::removeOMXBufferInfo($/;" f class:anatomy::AnatomyOMXClient +render AnatomyOMXClient.h /^ void render(void* buffer,int length){mRenderer.render(buffer, length);}$/;" f struct:anatomy::AnatomyOMXClient +render StdoutRenderer.cpp /^void StdoutRenderer::render(void* buffer,int length) {$/;" f class:anatomy::StdoutRenderer +setStatus AnatomyOMXClient.h /^ void setStatus(int status){ mStatus = status; }$/;" f struct:anatomy::AnatomyOMXClient +signalEOF AnatomyOMXClient.h /^ void signalEOF() { mEOSCondition.signal(); }$/;" f struct:anatomy::AnatomyOMXClient +signalExcutingState AnatomyOMXClient.h /^ void signalExcutingState() { mExcutingCondition.signal(); }$/;" f struct:anatomy::AnatomyOMXClient +signalIdleState AnatomyOMXClient.h /^ void signalIdleState() { mIdleCondition.signal(); }$/;" f struct:anatomy::AnatomyOMXClient +signalLoadedState AnatomyOMXClient.h /^ void signalLoadedState() { mLoadedCondition.signal(); }$/;" f struct:anatomy::AnatomyOMXClient +start AnatomyOMXClient.h /^ void start() { excute(); waitEOF();}$/;" f struct:anatomy::AnatomyOMXClient +stop AnatomyOMXClient.cpp /^void AnatomyOMXClient::stop() {$/;" f class:anatomy::AnatomyOMXClient +waitEOF AnatomyOMXClient.h /^ void waitEOF() { mEOSCondition.wait(mLock); }$/;" f struct:anatomy::AnatomyOMXClient +waitExcutingState AnatomyOMXClient.h /^ void waitExcutingState() { mExcutingCondition.wait(mLock);}$/;" f struct:anatomy::AnatomyOMXClient +waitIdleState AnatomyOMXClient.h /^ void waitIdleState() { mIdleCondition.wait(mLock); }$/;" f struct:anatomy::AnatomyOMXClient +waitLoadedState AnatomyOMXClient.h /^ void waitLoadedState() { mLoadedCondition.wait(mLock);}$/;" f struct:anatomy::AnatomyOMXClient +~ATMParser ATMParser.h /^ virtual ~ATMParser(){}$/;" f class:anatomy::ATMParser +~AnatomyOMXClient AnatomyOMXClient.h /^ virtual ~AnatomyOMXClient(){}$/;" f struct:anatomy::AnatomyOMXClient +~AnatomyOMXComponent AnatomyOMXComponent.cpp /^AnatomyOMXComponent::~AnatomyOMXComponent() {$/;" f class:android::AnatomyOMXComponent +~StdoutRenderer StdoutRenderer.h /^ virtual ~StdoutRenderer(){}$/;" f class:anatomy::StdoutRenderer diff --git a/multimedia/05_day/test-camera/Android.mk b/multimedia/05_day/test-camera/Android.mk new file mode 100644 index 0000000..61385e5 --- /dev/null +++ b/multimedia/05_day/test-camera/Android.mk @@ -0,0 +1,55 @@ +# Copyright 2013 The Android Open Source Project +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +LOCAL_PATH:= $(call my-dir) +include $(CLEAR_VARS) + +LOCAL_SRC_FILES:= \ + main.cpp \ + ProCameraTests.cpp \ + VendorTagDescriptorTests.cpp + +LOCAL_SHARED_LIBRARIES := \ + libutils \ + libcutils \ + libstlport \ + libcamera_metadata \ + libcamera_client \ + libgui \ + libsync \ + libui \ + libdl \ + libbinder + +LOCAL_STATIC_LIBRARIES := \ + libgtest + +LOCAL_C_INCLUDES += \ + bionic \ + bionic/libstdc++/include \ + external/gtest/include \ + external/stlport/stlport \ + system/media/camera/include \ + system/media/private/camera/include \ + system/media/camera/tests \ + frameworks/av/services/camera/libcameraservice \ + frameworks/av/include/camera \ + frameworks/native/include \ + +LOCAL_CFLAGS += -Wall -Wextra + +LOCAL_MODULE:= camera_client_test +LOCAL_MODULE_TAGS := tests + +include $(BUILD_NATIVE_TEST) diff --git a/multimedia/05_day/test-camera/ProCameraTests.cpp b/multimedia/05_day/test-camera/ProCameraTests.cpp new file mode 100644 index 0000000..04ffccf --- /dev/null +++ b/multimedia/05_day/test-camera/ProCameraTests.cpp @@ -0,0 +1,1314 @@ +/* + * Copyright (C) 2013 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include + +#include +#include + +#include "Camera.h" +#include "ProCamera.h" +#include +#include +#include + +#include +#include + +#include +#include // for CAMERA2_TEMPLATE_PREVIEW only +#include + +#include + +namespace android { +namespace camera2 { +namespace tests { +namespace client { + +#define CAMERA_ID 0 +#define TEST_DEBUGGING 1 + +#define TEST_LISTENER_TIMEOUT 1000000000 // 1 second listener timeout +#define TEST_FORMAT HAL_PIXEL_FORMAT_Y16 //TODO: YUY2 instead + +#define TEST_FORMAT_MAIN HAL_PIXEL_FORMAT_Y8 +#define TEST_FORMAT_DEPTH HAL_PIXEL_FORMAT_Y16 + +// defaults for display "test" +#define TEST_DISPLAY_FORMAT HAL_PIXEL_FORMAT_Y8 +#define TEST_DISPLAY_WIDTH 320 +#define TEST_DISPLAY_HEIGHT 240 + +#define TEST_CPU_FRAME_COUNT 2 +#define TEST_CPU_HEAP_COUNT 5 + +#define TEST_FRAME_PROCESSING_DELAY_US 200000 // 200 ms + +#if TEST_DEBUGGING +#define dout std::cerr +#else +#define dout if (0) std::cerr +#endif + +#define EXPECT_OK(x) EXPECT_EQ(OK, (x)) +#define ASSERT_OK(x) ASSERT_EQ(OK, (x)) + +class ProCameraTest; + +struct ServiceListener : public BnCameraServiceListener { + + ServiceListener() : + mLatestStatus(STATUS_UNKNOWN), + mPrevStatus(STATUS_UNKNOWN) + { + } + + void onStatusChanged(Status status, int32_t cameraId) { + dout << "On status changed: 0x" << std::hex + << (unsigned int) status << " cameraId " << cameraId + << std::endl; + + Mutex::Autolock al(mMutex); + + mLatestStatus = status; + mCondition.broadcast(); + } + + status_t waitForStatusChange(Status& newStatus) { + Mutex::Autolock al(mMutex); + + if (mLatestStatus != mPrevStatus) { + newStatus = mLatestStatus; + mPrevStatus = mLatestStatus; + return OK; + } + + status_t stat = mCondition.waitRelative(mMutex, + TEST_LISTENER_TIMEOUT); + + if (stat == OK) { + newStatus = mLatestStatus; + mPrevStatus = mLatestStatus; + } + + return stat; + } + + Condition mCondition; + Mutex mMutex; + + Status mLatestStatus; + Status mPrevStatus; +}; + +enum ProEvent { + UNKNOWN, + ACQUIRED, + RELEASED, + STOLEN, + FRAME_RECEIVED, + RESULT_RECEIVED, +}; + +inline int ProEvent_Mask(ProEvent e) { + return (1 << static_cast(e)); +} + +typedef Vector EventList; + +class ProCameraTestThread : public Thread +{ +public: + ProCameraTestThread() { + } + + virtual bool threadLoop() { + mProc = ProcessState::self(); + mProc->startThreadPool(); + + IPCThreadState *ptr = IPCThreadState::self(); + + ptr->joinThreadPool(); + + return false; + } + + sp mProc; +}; + +class ProCameraTestListener : public ProCameraListener { + +public: + static const int EVENT_MASK_ALL = 0xFFFFFFFF; + + ProCameraTestListener() { + mEventMask = EVENT_MASK_ALL; + mDropFrames = false; + } + + status_t WaitForEvent() { + Mutex::Autolock cal(mConditionMutex); + + { + Mutex::Autolock al(mListenerMutex); + + if (mProEventList.size() > 0) { + return OK; + } + } + + return mListenerCondition.waitRelative(mConditionMutex, + TEST_LISTENER_TIMEOUT); + } + + /* Read events into out. Existing queue is flushed */ + void ReadEvents(EventList& out) { + Mutex::Autolock al(mListenerMutex); + + for (size_t i = 0; i < mProEventList.size(); ++i) { + out.push(mProEventList[i]); + } + + mProEventList.clear(); + } + + /** + * Dequeue 1 event from the event queue. + * Returns UNKNOWN if queue is empty + */ + ProEvent ReadEvent() { + Mutex::Autolock al(mListenerMutex); + + if (mProEventList.size() == 0) { + return UNKNOWN; + } + + ProEvent ev = mProEventList[0]; + mProEventList.removeAt(0); + + return ev; + } + + void SetEventMask(int eventMask) { + Mutex::Autolock al(mListenerMutex); + mEventMask = eventMask; + } + + // Automatically acquire/release frames as they are available + void SetDropFrames(bool dropFrames) { + Mutex::Autolock al(mListenerMutex); + mDropFrames = dropFrames; + } + +private: + void QueueEvent(ProEvent ev) { + bool eventAdded = false; + { + Mutex::Autolock al(mListenerMutex); + + // Drop events not part of mask + if (ProEvent_Mask(ev) & mEventMask) { + mProEventList.push(ev); + eventAdded = true; + } + } + + if (eventAdded) { + mListenerCondition.broadcast(); + } + } + +protected: + + ////////////////////////////////////////////////// + ///////// ProCameraListener ////////////////////// + ////////////////////////////////////////////////// + + + // Lock has been acquired. Write operations now available. + virtual void onLockAcquired() { + QueueEvent(ACQUIRED); + } + // Lock has been released with exclusiveUnlock + virtual void onLockReleased() { + QueueEvent(RELEASED); + } + + // Lock has been stolen by another client. + virtual void onLockStolen() { + QueueEvent(STOLEN); + } + + // Lock free. + virtual void onTriggerNotify(int32_t ext1, int32_t ext2, int32_t ext3) { + + dout << "Trigger notify: " << ext1 << " " << ext2 + << " " << ext3 << std::endl; + } + + virtual void onFrameAvailable(int streamId, + const sp& consumer) { + + QueueEvent(FRAME_RECEIVED); + + Mutex::Autolock al(mListenerMutex); + if (mDropFrames) { + CpuConsumer::LockedBuffer buf; + status_t ret; + + if (OK == (ret = consumer->lockNextBuffer(&buf))) { + + dout << "Frame received on streamId = " << streamId << + ", dataPtr = " << (void*)buf.data << + ", timestamp = " << buf.timestamp << std::endl; + + EXPECT_OK(consumer->unlockBuffer(buf)); + } + } else { + dout << "Frame received on streamId = " << streamId << std::endl; + } + } + + virtual void onResultReceived(int32_t requestId, + camera_metadata* request) { + dout << "Result received requestId = " << requestId + << ", requestPtr = " << (void*)request << std::endl; + QueueEvent(RESULT_RECEIVED); + free_camera_metadata(request); + } + + virtual void notify(int32_t msg, int32_t ext1, int32_t ext2) { + dout << "Notify received: msg " << std::hex << msg + << ", ext1: " << std::hex << ext1 << ", ext2: " << std::hex << ext2 + << std::endl; + } + + Vector mProEventList; + Mutex mListenerMutex; + Mutex mConditionMutex; + Condition mListenerCondition; + int mEventMask; + bool mDropFrames; +}; + +class ProCameraTest : public ::testing::Test { + +public: + ProCameraTest() { + char* displaySecsEnv = getenv("TEST_DISPLAY_SECS"); + if (displaySecsEnv != NULL) { + mDisplaySecs = atoi(displaySecsEnv); + if (mDisplaySecs < 0) { + mDisplaySecs = 0; + } + } else { + mDisplaySecs = 100; + } + + char* displayFmtEnv = getenv("TEST_DISPLAY_FORMAT"); + if (displayFmtEnv != NULL) { + mDisplayFmt = FormatFromString(displayFmtEnv); + } else { + mDisplayFmt = TEST_DISPLAY_FORMAT; + } + + char* displayWidthEnv = getenv("TEST_DISPLAY_WIDTH"); + if (displayWidthEnv != NULL) { + mDisplayW = atoi(displayWidthEnv); + if (mDisplayW < 0) { + mDisplayW = 0; + } + } else { + mDisplayW = TEST_DISPLAY_WIDTH; + } + + char* displayHeightEnv = getenv("TEST_DISPLAY_HEIGHT"); + if (displayHeightEnv != NULL) { + mDisplayH = atoi(displayHeightEnv); + if (mDisplayH < 0) { + mDisplayH = 0; + } + } else { + mDisplayH = TEST_DISPLAY_HEIGHT; + } + } + + static void SetUpTestCase() { + // Binder Thread Pool Initialization + mTestThread = new ProCameraTestThread(); + mTestThread->run("ProCameraTestThread"); + } + + virtual void SetUp() { + mCamera = ProCamera::connect(CAMERA_ID); + ASSERT_NE((void*)NULL, mCamera.get()); + + mListener = new ProCameraTestListener(); + mCamera->setListener(mListener); + } + + virtual void TearDown() { + ASSERT_NE((void*)NULL, mCamera.get()); + mCamera->disconnect(); + } + +protected: + sp mCamera; + sp mListener; + + static sp mTestThread; + + int mDisplaySecs; + int mDisplayFmt; + int mDisplayW; + int mDisplayH; + + sp mComposerClient; + sp mSurfaceControl; + + sp mDepthComposerClient; + sp mDepthSurfaceControl; + + int getSurfaceWidth() { + return 512; + } + int getSurfaceHeight() { + return 512; + } + + void createOnScreenSurface(sp& surface) { + mComposerClient = new SurfaceComposerClient; + ASSERT_EQ(NO_ERROR, mComposerClient->initCheck()); + + mSurfaceControl = mComposerClient->createSurface( + String8("ProCameraTest StreamingImage Surface"), + 1280, 960, + PIXEL_FORMAT_RGB_888, 0); + + mSurfaceControl->setPosition(0, 0); + + ASSERT_TRUE(mSurfaceControl != NULL); + ASSERT_TRUE(mSurfaceControl->isValid()); + + SurfaceComposerClient::openGlobalTransaction(); + ASSERT_EQ(NO_ERROR, mSurfaceControl->setLayer(0x7FFFFFFF)); + ASSERT_EQ(NO_ERROR, mSurfaceControl->show()); + SurfaceComposerClient::closeGlobalTransaction(); + + sp window = mSurfaceControl->getSurface(); + surface = mSurfaceControl->getSurface(); + + ASSERT_NE((void*)NULL, surface.get()); + } + + void createDepthOnScreenSurface(sp& surface) { + mDepthComposerClient = new SurfaceComposerClient; + ASSERT_EQ(NO_ERROR, mDepthComposerClient->initCheck()); + + mDepthSurfaceControl = mDepthComposerClient->createSurface( + String8("ProCameraTest StreamingImage Surface"), + getSurfaceWidth(), getSurfaceHeight(), + PIXEL_FORMAT_RGB_888, 0); + + mDepthSurfaceControl->setPosition(640, 0); + + ASSERT_TRUE(mDepthSurfaceControl != NULL); + ASSERT_TRUE(mDepthSurfaceControl->isValid()); + + SurfaceComposerClient::openGlobalTransaction(); + ASSERT_EQ(NO_ERROR, mDepthSurfaceControl->setLayer(0x7FFFFFFF)); + ASSERT_EQ(NO_ERROR, mDepthSurfaceControl->show()); + SurfaceComposerClient::closeGlobalTransaction(); + + sp window = mDepthSurfaceControl->getSurface(); + surface = mDepthSurfaceControl->getSurface(); + + ASSERT_NE((void*)NULL, surface.get()); + } + + template + static bool ExistsItem(T needle, T* array, size_t count) { + if (!array) { + return false; + } + + for (size_t i = 0; i < count; ++i) { + if (array[i] == needle) { + return true; + } + } + return false; + } + + + static int FormatFromString(const char* str) { + std::string s(str); + +#define CMP_STR(x, y) \ + if (s == #x) return HAL_PIXEL_FORMAT_ ## y; +#define CMP_STR_SAME(x) CMP_STR(x, x) + + CMP_STR_SAME( Y16); + CMP_STR_SAME( Y8); + CMP_STR_SAME( YV12); + CMP_STR(NV16, YCbCr_422_SP); + CMP_STR(NV21, YCrCb_420_SP); + CMP_STR(YUY2, YCbCr_422_I); + CMP_STR(RAW, RAW_SENSOR); + CMP_STR(RGBA, RGBA_8888); + + std::cerr << "Unknown format string " << str << std::endl; + return -1; + + } + + /** + * Creating a streaming request for these output streams from a template, + * and submit it + */ + void createSubmitRequestForStreams(int32_t* streamIds, size_t count, int requestCount=-1) { + + ASSERT_NE((void*)NULL, streamIds); + ASSERT_LT(0u, count); + + camera_metadata_t *requestTmp = NULL; + EXPECT_OK(mCamera->createDefaultRequest(CAMERA2_TEMPLATE_PREVIEW, + /*out*/&requestTmp)); + ASSERT_NE((void*)NULL, requestTmp); + CameraMetadata request(requestTmp); + + // set the output streams. default is empty + + uint32_t tag = static_cast(ANDROID_REQUEST_OUTPUT_STREAMS); + request.update(tag, streamIds, count); + + requestTmp = request.release(); + + if (requestCount < 0) { + EXPECT_OK(mCamera->submitRequest(requestTmp, /*streaming*/true)); + } else { + for (int i = 0; i < requestCount; ++i) { + EXPECT_OK(mCamera->submitRequest(requestTmp, + /*streaming*/false)); + } + } + request.acquire(requestTmp); + } +}; + +sp ProCameraTest::mTestThread; + +#if 0 +TEST_F(ProCameraTest, AvailableFormats) { + if (HasFatalFailure()) { + return; + } + + CameraMetadata staticInfo = mCamera->getCameraInfo(CAMERA_ID); + ASSERT_FALSE(staticInfo.isEmpty()); + + uint32_t tag = static_cast(ANDROID_SCALER_AVAILABLE_FORMATS); + EXPECT_TRUE(staticInfo.exists(tag)); + camera_metadata_entry_t entry = staticInfo.find(tag); + + EXPECT_TRUE(ExistsItem(HAL_PIXEL_FORMAT_YV12, + entry.data.i32, entry.count)); + EXPECT_TRUE(ExistsItem(HAL_PIXEL_FORMAT_YCrCb_420_SP, + entry.data.i32, entry.count)); +} + +// test around exclusiveTryLock (immediate locking) +TEST_F(ProCameraTest, LockingImmediate) { + + if (HasFatalFailure()) { + return; + } + + mListener->SetEventMask(ProEvent_Mask(ACQUIRED) | + ProEvent_Mask(STOLEN) | + ProEvent_Mask(RELEASED)); + + EXPECT_FALSE(mCamera->hasExclusiveLock()); + EXPECT_EQ(OK, mCamera->exclusiveTryLock()); + // at this point we definitely have the lock + + EXPECT_EQ(OK, mListener->WaitForEvent()); + EXPECT_EQ(ACQUIRED, mListener->ReadEvent()); + + EXPECT_TRUE(mCamera->hasExclusiveLock()); + EXPECT_EQ(OK, mCamera->exclusiveUnlock()); + + EXPECT_EQ(OK, mListener->WaitForEvent()); + EXPECT_EQ(RELEASED, mListener->ReadEvent()); + + EXPECT_FALSE(mCamera->hasExclusiveLock()); +} + +// test around exclusiveLock (locking at some future point in time) +TEST_F(ProCameraTest, LockingAsynchronous) { + + if (HasFatalFailure()) { + return; + } + + + mListener->SetEventMask(ProEvent_Mask(ACQUIRED) | + ProEvent_Mask(STOLEN) | + ProEvent_Mask(RELEASED)); + + // TODO: Add another procamera that has a lock here. + // then we can be test that the lock wont immediately be acquired + + EXPECT_FALSE(mCamera->hasExclusiveLock()); + EXPECT_EQ(OK, mCamera->exclusiveTryLock()); + // at this point we definitely have the lock + + EXPECT_EQ(OK, mListener->WaitForEvent()); + EXPECT_EQ(ACQUIRED, mListener->ReadEvent()); + + EXPECT_TRUE(mCamera->hasExclusiveLock()); + EXPECT_EQ(OK, mCamera->exclusiveUnlock()); + + EXPECT_EQ(OK, mListener->WaitForEvent()); + EXPECT_EQ(RELEASED, mListener->ReadEvent()); + + EXPECT_FALSE(mCamera->hasExclusiveLock()); +} +#endif + +#if 1 +// Stream directly to the screen. +//TEST_F(ProCameraTest, DISABLED_StreamingImageSingle) { +TEST_F(ProCameraTest, StreamingImageSingle) { + if (HasFatalFailure()) { + return; + } + + sp surface; + if (mDisplaySecs > 0) { + createOnScreenSurface(/*out*/surface); + } + else { + dout << "Skipping, will not render to screen" << std::endl; + return; + } + + int depthStreamId = -1; + + sp listener = new ServiceListener(); + EXPECT_OK(ProCamera::addServiceListener(listener)); + + ServiceListener::Status currentStatus; + + // when subscribing a new listener, + // we immediately get a callback to the current status + while (listener->waitForStatusChange(/*out*/currentStatus) != OK); + EXPECT_EQ(ServiceListener::STATUS_PRESENT, currentStatus); + + dout << "Will now stream and resume infinitely..." << std::endl; + while (true) { + + if (currentStatus == ServiceListener::STATUS_PRESENT) { + + //ASSERT_OK(mCamera->createStream(mDisplayW, mDisplayH, mDisplayFmt, + // surface, + // &depthStreamId)); + ASSERT_OK(mCamera->createStream(1280,960, + //HAL_PIXEL_FORMAT_YV12, + HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED, + surface, + &depthStreamId)); + EXPECT_NE(-1, depthStreamId); + + EXPECT_OK(mCamera->exclusiveTryLock()); + + int32_t streams[] = { depthStreamId }; + ASSERT_NO_FATAL_FAILURE(createSubmitRequestForStreams( + streams, + /*count*/1)); + } + + ServiceListener::Status stat = ServiceListener::STATUS_UNKNOWN; + + // TODO: maybe check for getch every once in a while? + while (listener->waitForStatusChange(/*out*/stat) != OK); + + if (currentStatus != stat) { + if (stat == ServiceListener::STATUS_PRESENT) { + dout << "Reconnecting to camera" << std::endl; + mCamera = ProCamera::connect(CAMERA_ID); + } else if (stat == ServiceListener::STATUS_NOT_AVAILABLE) { + dout << "Disconnecting from camera" << std::endl; + mCamera->disconnect(); + } else if (stat == ServiceListener::STATUS_NOT_PRESENT) { + dout << "Camera unplugged" << std::endl; + mCamera = NULL; + } else { + dout << "Unknown status change " + << std::hex << stat << std::endl; + } + + currentStatus = stat; + } + } + + EXPECT_OK(ProCamera::removeServiceListener(listener)); + EXPECT_OK(mCamera->deleteStream(depthStreamId)); + EXPECT_OK(mCamera->exclusiveUnlock()); +} +#endif + +#if 0 +// Stream directly to the screen. +//TEST_F(ProCameraTest, DISABLED_StreamingImageDual) { +TEST_F(ProCameraTest, StreamingImageDual) { + if (HasFatalFailure()) { + return; + } + sp surface; + sp depthSurface; + if (mDisplaySecs > 0) { + createOnScreenSurface(/*out*/surface); + createDepthOnScreenSurface(/*out*/depthSurface); + } + + int streamId = -1; + EXPECT_OK(mCamera->createStream(/*width*/1280, /*height*/960, + HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED, surface, &streamId)); + EXPECT_NE(-1, streamId); + + int depthStreamId = -1; + EXPECT_OK(mCamera->createStream(/*width*/320, /*height*/240, + HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED, depthSurface, &depthStreamId)); + EXPECT_NE(-1, depthStreamId); + + EXPECT_OK(mCamera->exclusiveTryLock()); + /* + */ + /* iterate in a loop submitting requests every frame. + * what kind of requests doesnt really matter, just whatever. + */ + + // it would probably be better to use CameraMetadata from camera service. + camera_metadata_t *request = NULL; + EXPECT_OK(mCamera->createDefaultRequest(CAMERA2_TEMPLATE_PREVIEW, + /*out*/&request)); + EXPECT_NE((void*)NULL, request); + + /*FIXME: dont need this later, at which point the above should become an + ASSERT_NE*/ + if(request == NULL) request = allocate_camera_metadata(10, 100); + + // set the output streams to just this stream ID + + // wow what a verbose API. + int32_t allStreams[] = { streamId, depthStreamId }; + // IMPORTANT. bad things will happen if its not a uint8. + size_t streamCount = sizeof(allStreams) / sizeof(allStreams[0]); + camera_metadata_entry_t entry; + uint32_t tag = static_cast(ANDROID_REQUEST_OUTPUT_STREAMS); + int find = find_camera_metadata_entry(request, tag, &entry); + if (find == -ENOENT) { + if (add_camera_metadata_entry(request, tag, &allStreams, + /*data_count*/streamCount) != OK) { + camera_metadata_t *tmp = allocate_camera_metadata(1000, 10000); + ASSERT_OK(append_camera_metadata(tmp, request)); + free_camera_metadata(request); + request = tmp; + + ASSERT_OK(add_camera_metadata_entry(request, tag, &allStreams, + /*data_count*/streamCount)); + } + } else { + ASSERT_OK(update_camera_metadata_entry(request, entry.index, + &allStreams, /*data_count*/streamCount, &entry)); + } + + EXPECT_OK(mCamera->submitRequest(request, /*streaming*/true)); + + dout << "will sleep now for " << mDisplaySecs << std::endl; + sleep(mDisplaySecs); + + free_camera_metadata(request); + + for (size_t i = 0; i < streamCount; ++i) { + EXPECT_OK(mCamera->deleteStream(allStreams[i])); + } + EXPECT_OK(mCamera->exclusiveUnlock()); +} +#endif + +#if 0 +TEST_F(ProCameraTest, CpuConsumerSingle) { + if (HasFatalFailure()) { + return; + } + + mListener->SetEventMask(ProEvent_Mask(ACQUIRED) | + ProEvent_Mask(STOLEN) | + ProEvent_Mask(RELEASED) | + ProEvent_Mask(FRAME_RECEIVED)); + mListener->SetDropFrames(true); + + int streamId = -1; + sp consumer; + EXPECT_OK(mCamera->createStreamCpu(/*width*/320, /*height*/240, + HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED, TEST_CPU_HEAP_COUNT, &consumer, &streamId)); + EXPECT_NE(-1, streamId); + + EXPECT_OK(mCamera->exclusiveTryLock()); + EXPECT_EQ(OK, mListener->WaitForEvent()); + EXPECT_EQ(ACQUIRED, mListener->ReadEvent()); + /* iterate in a loop submitting requests every frame. + * what kind of requests doesnt really matter, just whatever. + */ + + // it would probably be better to use CameraMetadata from camera service. + camera_metadata_t *request = NULL; + EXPECT_OK(mCamera->createDefaultRequest(CAMERA2_TEMPLATE_PREVIEW, + /*out*/&request)); + EXPECT_NE((void*)NULL, request); + + /*FIXME: dont need this later, at which point the above should become an + ASSERT_NE*/ + if(request == NULL) request = allocate_camera_metadata(10, 100); + + // set the output streams to just this stream ID + + int32_t allStreams[] = { streamId }; + camera_metadata_entry_t entry; + uint32_t tag = static_cast(ANDROID_REQUEST_OUTPUT_STREAMS); + int find = find_camera_metadata_entry(request, tag, &entry); + if (find == -ENOENT) { + if (add_camera_metadata_entry(request, tag, &allStreams, + /*data_count*/1) != OK) { + camera_metadata_t *tmp = allocate_camera_metadata(1000, 10000); + ASSERT_OK(append_camera_metadata(tmp, request)); + free_camera_metadata(request); + request = tmp; + + ASSERT_OK(add_camera_metadata_entry(request, tag, &allStreams, + /*data_count*/1)); + } + } else { + ASSERT_OK(update_camera_metadata_entry(request, entry.index, + &allStreams, /*data_count*/1, &entry)); + } + + EXPECT_OK(mCamera->submitRequest(request, /*streaming*/true)); + + // Consume a couple of frames + for (int i = 0; i < TEST_CPU_FRAME_COUNT; ++i) { + EXPECT_EQ(OK, mListener->WaitForEvent()); + EXPECT_EQ(FRAME_RECEIVED, mListener->ReadEvent()); + } + + // Done: clean up + free_camera_metadata(request); + EXPECT_OK(mCamera->deleteStream(streamId)); + EXPECT_OK(mCamera->exclusiveUnlock()); +} + +#endif + +#if 0 + +TEST_F(ProCameraTest, CpuConsumerDual) { + if (HasFatalFailure()) { + return; + } + + mListener->SetEventMask(ProEvent_Mask(FRAME_RECEIVED)); + mListener->SetDropFrames(true); + + int streamId = -1; + sp consumer; + EXPECT_OK(mCamera->createStreamCpu(/*width*/1280, /*height*/960, + TEST_FORMAT_MAIN, TEST_CPU_HEAP_COUNT, &consumer, &streamId)); + EXPECT_NE(-1, streamId); + + int depthStreamId = -1; + EXPECT_OK(mCamera->createStreamCpu(/*width*/320, /*height*/240, + TEST_FORMAT_DEPTH, TEST_CPU_HEAP_COUNT, &consumer, &depthStreamId)); + EXPECT_NE(-1, depthStreamId); + + EXPECT_OK(mCamera->exclusiveTryLock()); + /* + */ + /* iterate in a loop submitting requests every frame. + * what kind of requests doesnt really matter, just whatever. + */ + + // it would probably be better to use CameraMetadata from camera service. + camera_metadata_t *request = NULL; + EXPECT_OK(mCamera->createDefaultRequest(CAMERA2_TEMPLATE_PREVIEW, + /*out*/&request)); + EXPECT_NE((void*)NULL, request); + + if(request == NULL) request = allocate_camera_metadata(10, 100); + + // set the output streams to just this stream ID + + // wow what a verbose API. + int32_t allStreams[] = { streamId, depthStreamId }; + size_t streamCount = 2; + camera_metadata_entry_t entry; + uint32_t tag = static_cast(ANDROID_REQUEST_OUTPUT_STREAMS); + int find = find_camera_metadata_entry(request, tag, &entry); + if (find == -ENOENT) { + if (add_camera_metadata_entry(request, tag, &allStreams, + /*data_count*/streamCount) != OK) { + camera_metadata_t *tmp = allocate_camera_metadata(1000, 10000); + ASSERT_OK(append_camera_metadata(tmp, request)); + free_camera_metadata(request); + request = tmp; + + ASSERT_OK(add_camera_metadata_entry(request, tag, &allStreams, + /*data_count*/streamCount)); + } + } else { + ASSERT_OK(update_camera_metadata_entry(request, entry.index, + &allStreams, /*data_count*/streamCount, &entry)); + } + + EXPECT_OK(mCamera->submitRequest(request, /*streaming*/true)); + + // Consume a couple of frames + for (int i = 0; i < TEST_CPU_FRAME_COUNT; ++i) { + // stream id 1 + EXPECT_EQ(OK, mListener->WaitForEvent()); + EXPECT_EQ(FRAME_RECEIVED, mListener->ReadEvent()); + + // stream id 2 + EXPECT_EQ(OK, mListener->WaitForEvent()); + EXPECT_EQ(FRAME_RECEIVED, mListener->ReadEvent()); + + //TODO: events should be a struct with some data like the stream id + } + + // Done: clean up + free_camera_metadata(request); + EXPECT_OK(mCamera->deleteStream(streamId)); + EXPECT_OK(mCamera->exclusiveUnlock()); +} +#endif + +#if 0 +TEST_F(ProCameraTest, ResultReceiver) { + if (HasFatalFailure()) { + return; + } + + mListener->SetEventMask(ProEvent_Mask(RESULT_RECEIVED)); + mListener->SetDropFrames(true); + //FIXME: if this is run right after the previous test we get FRAME_RECEIVED + // need to filter out events at read time + + int streamId = -1; + sp consumer; + EXPECT_OK(mCamera->createStreamCpu(/*width*/1280, /*height*/960, + TEST_FORMAT_MAIN, TEST_CPU_HEAP_COUNT, &consumer, &streamId)); + EXPECT_NE(-1, streamId); + + EXPECT_OK(mCamera->exclusiveTryLock()); + /* + */ + /* iterate in a loop submitting requests every frame. + * what kind of requests doesnt really matter, just whatever. + */ + + camera_metadata_t *request = NULL; + EXPECT_OK(mCamera->createDefaultRequest(CAMERA2_TEMPLATE_PREVIEW, + /*out*/&request)); + EXPECT_NE((void*)NULL, request); + + /*FIXME*/ + if(request == NULL) request = allocate_camera_metadata(10, 100); + + // set the output streams to just this stream ID + + int32_t allStreams[] = { streamId }; + size_t streamCount = 1; + camera_metadata_entry_t entry; + uint32_t tag = static_cast(ANDROID_REQUEST_OUTPUT_STREAMS); + int find = find_camera_metadata_entry(request, tag, &entry); + if (find == -ENOENT) { + if (add_camera_metadata_entry(request, tag, &allStreams, + /*data_count*/streamCount) != OK) { + camera_metadata_t *tmp = allocate_camera_metadata(1000, 10000); + ASSERT_OK(append_camera_metadata(tmp, request)); + free_camera_metadata(request); + request = tmp; + + ASSERT_OK(add_camera_metadata_entry(request, tag, &allStreams, + /*data_count*/streamCount)); + } + } else { + ASSERT_OK(update_camera_metadata_entry(request, entry.index, + &allStreams, /*data_count*/streamCount, &entry)); + } + + EXPECT_OK(mCamera->submitRequest(request, /*streaming*/true)); + + // Consume a couple of results + for (int i = 0; i < TEST_CPU_FRAME_COUNT; ++i) { + EXPECT_EQ(OK, mListener->WaitForEvent()); + EXPECT_EQ(RESULT_RECEIVED, mListener->ReadEvent()); + } + + // Done: clean up + free_camera_metadata(request); + EXPECT_OK(mCamera->deleteStream(streamId)); + EXPECT_OK(mCamera->exclusiveUnlock()); +} +#endif + +#if 0 +// FIXME: This is racy and sometimes fails on waitForFrameMetadata +TEST_F(ProCameraTest, DISABLED_WaitForResult) { + if (HasFatalFailure()) { + return; + } + + mListener->SetDropFrames(true); + + int streamId = -1; + sp consumer; + EXPECT_OK(mCamera->createStreamCpu(/*width*/1280, /*height*/960, + TEST_FORMAT_MAIN, TEST_CPU_HEAP_COUNT, &consumer, &streamId)); + EXPECT_NE(-1, streamId); + + EXPECT_OK(mCamera->exclusiveTryLock()); + + int32_t streams[] = { streamId }; + ASSERT_NO_FATAL_FAILURE(createSubmitRequestForStreams(streams, /*count*/1)); + + // Consume a couple of results + for (int i = 0; i < TEST_CPU_FRAME_COUNT; ++i) { + EXPECT_OK(mCamera->waitForFrameMetadata()); + CameraMetadata meta = mCamera->consumeFrameMetadata(); + EXPECT_FALSE(meta.isEmpty()); + } + + // Done: clean up + EXPECT_OK(mCamera->deleteStream(streamId)); + EXPECT_OK(mCamera->exclusiveUnlock()); +} +#endif + +#if 0 +TEST_F(ProCameraTest, WaitForSingleStreamBuffer) { + if (HasFatalFailure()) { + return; + } + + int streamId = -1; + sp consumer; + EXPECT_OK(mCamera->createStreamCpu(/*width*/1280, /*height*/960, + HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED, TEST_CPU_HEAP_COUNT, &consumer, &streamId)); + EXPECT_NE(-1, streamId); + + EXPECT_OK(mCamera->exclusiveTryLock()); + + int32_t streams[] = { streamId }; + ASSERT_NO_FATAL_FAILURE(createSubmitRequestForStreams(streams, /*count*/1, + /*requests*/TEST_CPU_FRAME_COUNT)); + + // Consume a couple of results + for (int i = 0; i < TEST_CPU_FRAME_COUNT; ++i) { + EXPECT_EQ(1, mCamera->waitForFrameBuffer(streamId)); + + CpuConsumer::LockedBuffer buf; + EXPECT_OK(consumer->lockNextBuffer(&buf)); + + dout << "Buffer synchronously received on streamId = " << streamId << + ", dataPtr = " << (void*)buf.data << + ", timestamp = " << buf.timestamp << std::endl; + + EXPECT_OK(consumer->unlockBuffer(buf)); + } + + // Done: clean up + EXPECT_OK(mCamera->deleteStream(streamId)); + EXPECT_OK(mCamera->exclusiveUnlock()); +} +#endif + +#if 0 + +// FIXME: This is racy and sometimes fails on waitForFrameMetadata +TEST_F(ProCameraTest, DISABLED_WaitForDualStreamBuffer) { + if (HasFatalFailure()) { + return; + } + + const int REQUEST_COUNT = TEST_CPU_FRAME_COUNT * 10; + + // 15 fps + int streamId = -1; + sp consumer; + EXPECT_OK(mCamera->createStreamCpu(/*width*/1280, /*height*/960, + TEST_FORMAT_MAIN, TEST_CPU_HEAP_COUNT, &consumer, &streamId)); + EXPECT_NE(-1, streamId); + + // 30 fps + int depthStreamId = -1; + sp depthConsumer; + EXPECT_OK(mCamera->createStreamCpu(/*width*/320, /*height*/240, + TEST_FORMAT_DEPTH, TEST_CPU_HEAP_COUNT, &depthConsumer, &depthStreamId)); + EXPECT_NE(-1, depthStreamId); + + EXPECT_OK(mCamera->exclusiveTryLock()); + + int32_t streams[] = { streamId, depthStreamId }; + ASSERT_NO_FATAL_FAILURE(createSubmitRequestForStreams(streams, /*count*/2, + /*requests*/REQUEST_COUNT)); + + int depthFrames = 0; + int greyFrames = 0; + + // Consume two frames simultaneously. Unsynchronized by timestamps. + for (int i = 0; i < REQUEST_COUNT; ++i) { + + // Exhaust event queue so it doesn't keep growing + while (mListener->ReadEvent() != UNKNOWN); + + // Get the metadata + EXPECT_OK(mCamera->waitForFrameMetadata()); + CameraMetadata meta = mCamera->consumeFrameMetadata(); + EXPECT_FALSE(meta.isEmpty()); + + // Get the buffers + + EXPECT_EQ(1, mCamera->waitForFrameBuffer(depthStreamId)); + + /** + * Guaranteed to be able to consume the depth frame, + * since we waited on it. + */ + CpuConsumer::LockedBuffer depthBuffer; + EXPECT_OK(depthConsumer->lockNextBuffer(&depthBuffer)); + + dout << "Depth Buffer synchronously received on streamId = " << + streamId << + ", dataPtr = " << (void*)depthBuffer.data << + ", timestamp = " << depthBuffer.timestamp << std::endl; + + EXPECT_OK(depthConsumer->unlockBuffer(depthBuffer)); + + depthFrames++; + + + /** Consume Greyscale frames if there are any. + * There may not be since it runs at half FPS */ + CpuConsumer::LockedBuffer greyBuffer; + while (consumer->lockNextBuffer(&greyBuffer) == OK) { + + dout << "GRAY Buffer synchronously received on streamId = " << + streamId << + ", dataPtr = " << (void*)greyBuffer.data << + ", timestamp = " << greyBuffer.timestamp << std::endl; + + EXPECT_OK(consumer->unlockBuffer(greyBuffer)); + + greyFrames++; + } + } + + dout << "Done, summary: depth frames " << std::dec << depthFrames + << ", grey frames " << std::dec << greyFrames << std::endl; + + // Done: clean up + EXPECT_OK(mCamera->deleteStream(streamId)); + EXPECT_OK(mCamera->exclusiveUnlock()); +} +#endif + +#if 0 +TEST_F(ProCameraTest, WaitForSingleStreamBufferAndDropFramesSync) { + if (HasFatalFailure()) { + return; + } + + const int NUM_REQUESTS = 20 * TEST_CPU_FRAME_COUNT; + + int streamId = -1; + sp consumer; + EXPECT_OK(mCamera->createStreamCpu(/*width*/1280, /*height*/960, + TEST_FORMAT_MAIN, TEST_CPU_HEAP_COUNT, + /*synchronousMode*/true, &consumer, &streamId)); + EXPECT_NE(-1, streamId); + + EXPECT_OK(mCamera->exclusiveTryLock()); + + int32_t streams[] = { streamId }; + ASSERT_NO_FATAL_FAILURE(createSubmitRequestForStreams(streams, /*count*/1, + /*requests*/NUM_REQUESTS)); + + // Consume a couple of results + for (int i = 0; i < NUM_REQUESTS; ++i) { + int numFrames; + EXPECT_TRUE((numFrames = mCamera->waitForFrameBuffer(streamId)) > 0); + + // Drop all but the newest framebuffer + EXPECT_EQ(numFrames-1, mCamera->dropFrameBuffer(streamId, numFrames-1)); + + dout << "Dropped " << (numFrames - 1) << " frames" << std::endl; + + // Skip the counter ahead, don't try to consume these frames again + i += numFrames-1; + + // "Consume" the buffer + CpuConsumer::LockedBuffer buf; + EXPECT_OK(consumer->lockNextBuffer(&buf)); + + dout << "Buffer synchronously received on streamId = " << streamId << + ", dataPtr = " << (void*)buf.data << + ", timestamp = " << buf.timestamp << std::endl; + + // Process at 10fps, stream is at 15fps. + // This means we will definitely fill up the buffer queue with + // extra buffers and need to drop them. + usleep(TEST_FRAME_PROCESSING_DELAY_US); + + EXPECT_OK(consumer->unlockBuffer(buf)); + } + + // Done: clean up + EXPECT_OK(mCamera->deleteStream(streamId)); + EXPECT_OK(mCamera->exclusiveUnlock()); +} +#endif + +#if 0 +TEST_F(ProCameraTest, WaitForSingleStreamBufferAndDropFramesAsync) { + if (HasFatalFailure()) { + return; + } + + const int NUM_REQUESTS = 20 * TEST_CPU_FRAME_COUNT; + + int streamId = -1; + sp consumer; + EXPECT_OK(mCamera->createStreamCpu(/*width*/1280, /*height*/960, + TEST_FORMAT_MAIN, TEST_CPU_HEAP_COUNT, + /*synchronousMode*/false, &consumer, &streamId)); + EXPECT_NE(-1, streamId); + + EXPECT_OK(mCamera->exclusiveTryLock()); + + int32_t streams[] = { streamId }; + ASSERT_NO_FATAL_FAILURE(createSubmitRequestForStreams(streams, /*count*/1, + /*requests*/NUM_REQUESTS)); + + uint64_t lastFrameNumber = 0; + int numFrames; + + // Consume a couple of results + int i; + for (i = 0; i < NUM_REQUESTS && lastFrameNumber < NUM_REQUESTS; ++i) { + EXPECT_LT(0, (numFrames = mCamera->waitForFrameBuffer(streamId))); + + dout << "Dropped " << (numFrames - 1) << " frames" << std::endl; + + // Skip the counter ahead, don't try to consume these frames again + i += numFrames-1; + + // "Consume" the buffer + CpuConsumer::LockedBuffer buf; + + EXPECT_EQ(OK, consumer->lockNextBuffer(&buf)); + + lastFrameNumber = buf.frameNumber; + + dout << "Buffer asynchronously received on streamId = " << streamId << + ", dataPtr = " << (void*)buf.data << + ", timestamp = " << buf.timestamp << + ", framenumber = " << buf.frameNumber << std::endl; + + // Process at 10fps, stream is at 15fps. + // This means we will definitely fill up the buffer queue with + // extra buffers and need to drop them. + usleep(TEST_FRAME_PROCESSING_DELAY_US); + + EXPECT_OK(consumer->unlockBuffer(buf)); + } + + dout << "Done after " << i << " iterations " << std::endl; + + // Done: clean up + EXPECT_OK(mCamera->deleteStream(streamId)); + EXPECT_OK(mCamera->exclusiveUnlock()); +} +#endif + + + +#if 0 +//TODO: refactor into separate file +TEST_F(ProCameraTest, ServiceListenersSubscribe) { + + ASSERT_EQ(4u, sizeof(ServiceListener::Status)); + + sp listener = new ServiceListener(); + + EXPECT_EQ(BAD_VALUE, ProCamera::removeServiceListener(listener)); + EXPECT_OK(ProCamera::addServiceListener(listener)); + + EXPECT_EQ(ALREADY_EXISTS, ProCamera::addServiceListener(listener)); + EXPECT_OK(ProCamera::removeServiceListener(listener)); + + EXPECT_EQ(BAD_VALUE, ProCamera::removeServiceListener(listener)); +} +#endif + +#if 0 +//TODO: refactor into separate file +TEST_F(ProCameraTest, ServiceListenersFunctional) { + + sp listener = new ServiceListener(); + + EXPECT_OK(ProCamera::addServiceListener(listener)); + + sp cam = Camera::connect(CAMERA_ID, + /*clientPackageName*/String16(), + -1); + EXPECT_NE((void*)NULL, cam.get()); + + ServiceListener::Status stat = ServiceListener::STATUS_UNKNOWN; + EXPECT_OK(listener->waitForStatusChange(/*out*/stat)); + + EXPECT_EQ(ServiceListener::STATUS_NOT_AVAILABLE, stat); + + if (cam.get()) { + cam->disconnect(); + } + + EXPECT_OK(listener->waitForStatusChange(/*out*/stat)); + EXPECT_EQ(ServiceListener::STATUS_PRESENT, stat); + + EXPECT_OK(ProCamera::removeServiceListener(listener)); +} +#endif + + + +} +} +} +} diff --git a/multimedia/05_day/test-camera/VendorTagDescriptorTests.cpp b/multimedia/05_day/test-camera/VendorTagDescriptorTests.cpp new file mode 100644 index 0000000..3c0bb14 --- /dev/null +++ b/multimedia/05_day/test-camera/VendorTagDescriptorTests.cpp @@ -0,0 +1,205 @@ +/* + * Copyright (C) 2014 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#define LOG_NDEBUG 0 +#define LOG_TAG "VendorTagDescriptorTests" + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +using namespace android; + +enum { + BAD_TAG_ARRAY = 0xDEADBEEFu, + BAD_TAG = 0x8DEADBADu, +}; + +#define ARRAY_SIZE(a) (sizeof(a) / sizeof((a)[0])) + +static bool ContainsTag(uint32_t* tagArray, size_t size, uint32_t tag) { + for (size_t i = 0; i < size; ++i) { + if (tag == tagArray[i]) return true; + } + return false; +} + +#define EXPECT_CONTAINS_TAG(t, a) \ + EXPECT_TRUE(ContainsTag(a, ARRAY_SIZE(a), t)) + +#define ASSERT_NOT_NULL(x) \ + ASSERT_TRUE((x) != NULL) + +extern "C" { + +static int default_get_tag_count(const vendor_tag_ops_t* vOps) { + return VENDOR_TAG_COUNT_ERR; +} + +static void default_get_all_tags(const vendor_tag_ops_t* vOps, uint32_t* tagArray) { + //Noop +} + +static const char* default_get_section_name(const vendor_tag_ops_t* vOps, uint32_t tag) { + return VENDOR_SECTION_NAME_ERR; +} + +static const char* default_get_tag_name(const vendor_tag_ops_t* vOps, uint32_t tag) { + return VENDOR_TAG_NAME_ERR; +} + +static int default_get_tag_type(const vendor_tag_ops_t* vOps, uint32_t tag) { + return VENDOR_TAG_TYPE_ERR; +} + +} /*extern "C"*/ + +// Set default vendor operations for a vendor_tag_ops struct +static void FillWithDefaults(vendor_tag_ops_t* vOps) { + ASSERT_NOT_NULL(vOps); + vOps->get_tag_count = default_get_tag_count; + vOps->get_all_tags = default_get_all_tags; + vOps->get_section_name = default_get_section_name; + vOps->get_tag_name = default_get_tag_name; + vOps->get_tag_type = default_get_tag_type; +} + +/** + * Test if values from VendorTagDescriptor methods match corresponding values + * from vendor_tag_ops functions. + */ +#if 0 +TEST(VendorTagDescriptorTest, ConsistentWithVendorTags) { + sp vDesc; + const vendor_tag_ops_t *vOps = &fakevendor_ops; + EXPECT_EQ(OK, VendorTagDescriptor::createDescriptorFromOps(vOps, /*out*/vDesc)); + + ASSERT_NOT_NULL(vDesc); + + // Ensure reasonable tag count + int tagCount = vDesc->getTagCount(); + EXPECT_EQ(tagCount, vOps->get_tag_count(vOps)); + + uint32_t descTagArray[tagCount]; + uint32_t opsTagArray[tagCount]; + + // Get all tag ids + vDesc->getTagArray(descTagArray); + vOps->get_all_tags(vOps, opsTagArray); + + ASSERT_NOT_NULL(descTagArray); + ASSERT_NOT_NULL(opsTagArray); + + uint32_t tag; + for (int i = 0; i < tagCount; ++i) { + // For each tag id, check whether type, section name, tag name match + tag = descTagArray[i]; + EXPECT_CONTAINS_TAG(tag, opsTagArray); + EXPECT_EQ(vDesc->getTagType(tag), vOps->get_tag_type(vOps, tag)); + EXPECT_STREQ(vDesc->getSectionName(tag), vOps->get_section_name(vOps, tag)); + EXPECT_STREQ(vDesc->getTagName(tag), vOps->get_tag_name(vOps, tag)); + } +} + +/** + * Test if values from VendorTagDescriptor methods stay consistent after being + * parcelled/unparcelled. + */ +TEST(VendorTagDescriptorTest, ConsistentAcrossParcel) { + sp vDescOriginal, vDescParceled; + const vendor_tag_ops_t *vOps = &fakevendor_ops; + EXPECT_EQ(OK, VendorTagDescriptor::createDescriptorFromOps(vOps, /*out*/vDescOriginal)); + + ASSERT_TRUE(vDescOriginal != NULL); + + Parcel p; + + // Check whether parcel read/write succeed + EXPECT_EQ(OK, vDescOriginal->writeToParcel(&p)); + p.setDataPosition(0); + ASSERT_EQ(OK, VendorTagDescriptor::createFromParcel(&p, vDescParceled)); + + // Ensure consistent tag count + int tagCount = vDescOriginal->getTagCount(); + ASSERT_EQ(tagCount, vDescParceled->getTagCount()); + + uint32_t descTagArray[tagCount]; + uint32_t desc2TagArray[tagCount]; + + // Get all tag ids + vDescOriginal->getTagArray(descTagArray); + vDescParceled->getTagArray(desc2TagArray); + + ASSERT_NOT_NULL(descTagArray); + ASSERT_NOT_NULL(desc2TagArray); + + uint32_t tag; + for (int i = 0; i < tagCount; ++i) { + // For each tag id, check consistency between the two vendor tag + // descriptors for each type, section name, tag name + tag = descTagArray[i]; + EXPECT_CONTAINS_TAG(tag, desc2TagArray); + EXPECT_EQ(vDescOriginal->getTagType(tag), vDescParceled->getTagType(tag)); + EXPECT_STREQ(vDescOriginal->getSectionName(tag), vDescParceled->getSectionName(tag)); + EXPECT_STREQ(vDescOriginal->getTagName(tag), vDescParceled->getTagName(tag)); + } +} + +/** + * Test defaults and error conditions. + */ +TEST(VendorTagDescriptorTest, ErrorConditions) { + sp vDesc; + vendor_tag_ops_t vOps; + FillWithDefaults(&vOps); + + // Ensure create fails when using null vOps + EXPECT_EQ(BAD_VALUE, VendorTagDescriptor::createDescriptorFromOps(/*vOps*/NULL, vDesc)); + + // Ensure create works when there are no vtags defined in a well-formed vOps + ASSERT_EQ(OK, VendorTagDescriptor::createDescriptorFromOps(&vOps, vDesc)); + + // Ensure defaults are returned when no vtags are defined, or tag is unknown + EXPECT_EQ(VENDOR_TAG_COUNT_ERR, vDesc->getTagCount()); + uint32_t* tagArray = reinterpret_cast(BAD_TAG_ARRAY); + uint32_t* testArray = tagArray; + vDesc->getTagArray(tagArray); + EXPECT_EQ(testArray, tagArray); + EXPECT_EQ(VENDOR_SECTION_NAME_ERR, vDesc->getSectionName(BAD_TAG)); + EXPECT_EQ(VENDOR_TAG_NAME_ERR, vDesc->getTagName(BAD_TAG)); + EXPECT_EQ(VENDOR_TAG_TYPE_ERR, vDesc->getTagType(BAD_TAG)); + + // Make sure global can be set/cleared + const vendor_tag_ops_t *fakeOps = &fakevendor_ops; + sp prevGlobal = VendorTagDescriptor::getGlobalVendorTagDescriptor(); + VendorTagDescriptor::clearGlobalVendorTagDescriptor(); + + EXPECT_TRUE(VendorTagDescriptor::getGlobalVendorTagDescriptor() == NULL); + EXPECT_EQ(OK, VendorTagDescriptor::setAsGlobalVendorTagDescriptor(vDesc)); + EXPECT_TRUE(VendorTagDescriptor::getGlobalVendorTagDescriptor() != NULL); + EXPECT_EQ(VENDOR_SECTION_NAME_ERR, vDesc->getSectionName(BAD_TAG)); + EXPECT_EQ(OK, VendorTagDescriptor::setAsGlobalVendorTagDescriptor(prevGlobal)); + EXPECT_EQ(prevGlobal, VendorTagDescriptor::getGlobalVendorTagDescriptor()); +} +#endif diff --git a/multimedia/05_day/test-camera/main.cpp b/multimedia/05_day/test-camera/main.cpp new file mode 100644 index 0000000..8c8c515 --- /dev/null +++ b/multimedia/05_day/test-camera/main.cpp @@ -0,0 +1,27 @@ +/* + * Copyright (C) 2013 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include + + +int main(int argc, char **argv) { + + ::testing::InitGoogleTest(&argc, argv); + + int ret = RUN_ALL_TESTS(); + + return ret; +} diff --git a/multimedia/05_day/test-player/Android.mk b/multimedia/05_day/test-player/Android.mk new file mode 100644 index 0000000..04b6f2d --- /dev/null +++ b/multimedia/05_day/test-player/Android.mk @@ -0,0 +1,10 @@ +LOCAL_PATH := $(call my-dir) + +include $(CLEAR_VARS) + +LOCAL_MODULE := test-player +LOCAL_SRC_FILES := player.cpp +LOCAL_SHARED_LIBRARIES := libutils libui libgui libmedia libbinder + +include $(BUILD_EXECUTABLE) + diff --git a/multimedia/05_day/test-player/a.mp4 b/multimedia/05_day/test-player/a.mp4 new file mode 100644 index 0000000..33793dd Binary files /dev/null and b/multimedia/05_day/test-player/a.mp4 differ diff --git a/multimedia/05_day/test-player/player.cpp b/multimedia/05_day/test-player/player.cpp new file mode 100644 index 0000000..4ba2f9d --- /dev/null +++ b/multimedia/05_day/test-player/player.cpp @@ -0,0 +1,86 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +using namespace android; + +int main(int argc, char **argv) +{ + sp < ProcessState > proc = ProcessState::self(); + proc->startThreadPool(); + printf("entering main...\n"); + MediaPlayer mediaplayer; + sp gsf; + status_t nState; + int fd, fsize; + + fd = open( argv[1], O_RDONLY ); + if( fd < 0 ) + { + perror("open()"); + exit(-1); + } + fsize = lseek( fd, 0, SEEK_END ); + printf("fsize=%d\n", fsize ); + mediaplayer.setDataSource(fd,0,fsize); + close(fd); + + printf("create SurfaceComposerClient\n"); + + sp composerClient; + sp control; + + composerClient = new SurfaceComposerClient; + composerClient->initCheck(); + + printf("create video surface\n"); + + control = composerClient->createSurface( + String8("A Surface"), + 1280, + 800, + PIXEL_FORMAT_RGBA_8888, + 0); + + SurfaceComposerClient::openGlobalTransaction(); + control->setLayer(INT_MAX); + control->show(); + SurfaceComposerClient::closeGlobalTransaction(); + + + gsf = control->getSurface(); + sp videoSurfaceTexture = gsf->getIGraphicBufferProducer(); + printf("set video surface to player\n"); + + mediaplayer.setVideoSurfaceTexture(videoSurfaceTexture); + + status_t retCode = mediaplayer.prepare(); + + if (retCode < 0) + { + printf("prepare failed: %d\n", retCode); + IPCThreadState::self()->stopProcess(0); + return -1; + }; + + mediaplayer.start(); + for (int i = 0; i < 4*60; i++) + { + sleep(1); + } + mediaplayer.reset(); + + // close binder fd, still need waiting for all binder threads exit? + IPCThreadState::self()->stopProcess(0); + return 0; +} +