qemu-e2k/qapi/audio.json

423 lines
10 KiB
JSON
Raw Normal View History

# -*- mode: python -*-
#
# Copyright (C) 2015-2019 Zoltán Kővágó <DirtY.iCE.hu@gmail.com>
#
# This work is licensed under the terms of the GNU GPL, version 2 or later.
# See the COPYING file in the top-level directory.
##
# = Audio
##
##
# @AudiodevPerDirectionOptions:
#
# General audio backend options that are used for both playback and
# recording.
#
audio: add mixing-engine option (documentation) This will allow us to disable mixeng when we use a decent backend. Disabling mixeng have a few advantages: * we no longer convert the audio output from one format to another, when the underlying audio system would just convert it to a third format. We no longer convert, only the underlying system, when needed. * the underlying system probably has better resampling and sample format converting methods anyway... * we may support formats that the mixeng currently does not support (S24 or float samples, more than two channels) * when using an audio server (like pulseaudio) different sound card outputs will show up as separate streams, even if we use only one backend Disadvantages: * audio capturing no longer works (wavcapture, and vnc audio extension) * some backends only support a single playback stream or very picky about the audio format. In this case we can't disable mixeng. Originally thw two main use cases of the disabled option was: using unsupported audio formats (5.1 and 7.1 audio) and having different pulseaudio streams per audio frontend. Since we can have multiple -audiodevs, the latter is not that important, so currently you only need this option if you want to use 5.1 or 7.1 audio (implemented in a later patch), otherwise it's probably better to stick to the old and tried mixeng, since it's less picky about the backends. The ideal solution would be to port as much as possible to gstreamer, but this is currently out of scope: https://wiki.qemu.org/Internships/ProjectIdeas/AudioGStreamer Signed-off-by: Kővágó, Zoltán <DirtY.iCE.hu@gmail.com> Message-id: 5765186a7aadd51a72bc7d3e804307f0ee8a34ce.1570996490.git.DirtY.iCE.hu@gmail.com Signed-off-by: Gerd Hoffmann <kraxel@redhat.com>
2019-10-13 21:57:58 +02:00
# @mixing-engine: use QEMU's mixing engine to mix all streams inside QEMU and
# convert audio formats when not supported by the backend. When
# set to off, fixed-settings must be also off (default on,
# since 4.2)
#
# @fixed-settings: use fixed settings for host input/output. When off,
# frequency, channels and format must not be
# specified (default true)
#
# @frequency: frequency to use when using fixed settings
# (default 44100)
#
# @channels: number of channels when using fixed settings (default 2)
#
# @voices: number of voices to use (default 1)
#
# @format: sample format to use when using fixed settings
# (default s16)
#
# @buffer-length: the buffer length in microseconds
#
# Since: 4.0
##
{ 'struct': 'AudiodevPerDirectionOptions',
'data': {
audio: add mixing-engine option (documentation) This will allow us to disable mixeng when we use a decent backend. Disabling mixeng have a few advantages: * we no longer convert the audio output from one format to another, when the underlying audio system would just convert it to a third format. We no longer convert, only the underlying system, when needed. * the underlying system probably has better resampling and sample format converting methods anyway... * we may support formats that the mixeng currently does not support (S24 or float samples, more than two channels) * when using an audio server (like pulseaudio) different sound card outputs will show up as separate streams, even if we use only one backend Disadvantages: * audio capturing no longer works (wavcapture, and vnc audio extension) * some backends only support a single playback stream or very picky about the audio format. In this case we can't disable mixeng. Originally thw two main use cases of the disabled option was: using unsupported audio formats (5.1 and 7.1 audio) and having different pulseaudio streams per audio frontend. Since we can have multiple -audiodevs, the latter is not that important, so currently you only need this option if you want to use 5.1 or 7.1 audio (implemented in a later patch), otherwise it's probably better to stick to the old and tried mixeng, since it's less picky about the backends. The ideal solution would be to port as much as possible to gstreamer, but this is currently out of scope: https://wiki.qemu.org/Internships/ProjectIdeas/AudioGStreamer Signed-off-by: Kővágó, Zoltán <DirtY.iCE.hu@gmail.com> Message-id: 5765186a7aadd51a72bc7d3e804307f0ee8a34ce.1570996490.git.DirtY.iCE.hu@gmail.com Signed-off-by: Gerd Hoffmann <kraxel@redhat.com>
2019-10-13 21:57:58 +02:00
'*mixing-engine': 'bool',
'*fixed-settings': 'bool',
'*frequency': 'uint32',
'*channels': 'uint32',
'*voices': 'uint32',
'*format': 'AudioFormat',
'*buffer-length': 'uint32' } }
##
# @AudiodevGenericOptions:
#
# Generic driver-specific options.
#
# @in: options of the capture stream
#
# @out: options of the playback stream
#
# Since: 4.0
##
{ 'struct': 'AudiodevGenericOptions',
'data': {
'*in': 'AudiodevPerDirectionOptions',
'*out': 'AudiodevPerDirectionOptions' } }
##
# @AudiodevAlsaPerDirectionOptions:
#
# Options of the ALSA backend that are used for both playback and
# recording.
#
# @dev: the name of the ALSA device to use (default 'default')
#
# @period-length: the period length in microseconds
#
# @try-poll: attempt to use poll mode, falling back to non-polling
# access on failure (default true)
#
# Since: 4.0
##
{ 'struct': 'AudiodevAlsaPerDirectionOptions',
'base': 'AudiodevPerDirectionOptions',
'data': {
'*dev': 'str',
'*period-length': 'uint32',
'*try-poll': 'bool' } }
##
# @AudiodevAlsaOptions:
#
# Options of the ALSA audio backend.
#
# @in: options of the capture stream
#
# @out: options of the playback stream
#
# @threshold: set the threshold (in microseconds) when playback starts
#
# Since: 4.0
##
{ 'struct': 'AudiodevAlsaOptions',
'data': {
'*in': 'AudiodevAlsaPerDirectionOptions',
'*out': 'AudiodevAlsaPerDirectionOptions',
'*threshold': 'uint32' } }
##
# @AudiodevCoreaudioPerDirectionOptions:
#
# Options of the Core Audio backend that are used for both playback and
# recording.
#
# @buffer-count: number of buffers
#
# Since: 4.0
##
{ 'struct': 'AudiodevCoreaudioPerDirectionOptions',
'base': 'AudiodevPerDirectionOptions',
'data': {
'*buffer-count': 'uint32' } }
##
# @AudiodevCoreaudioOptions:
#
# Options of the coreaudio audio backend.
#
# @in: options of the capture stream
#
# @out: options of the playback stream
#
# Since: 4.0
##
{ 'struct': 'AudiodevCoreaudioOptions',
'data': {
'*in': 'AudiodevCoreaudioPerDirectionOptions',
'*out': 'AudiodevCoreaudioPerDirectionOptions' } }
##
# @AudiodevDsoundOptions:
#
# Options of the DirectSound audio backend.
#
# @in: options of the capture stream
#
# @out: options of the playback stream
#
# @latency: add extra latency to playback in microseconds
# (default 10000)
#
# Since: 4.0
##
{ 'struct': 'AudiodevDsoundOptions',
'data': {
'*in': 'AudiodevPerDirectionOptions',
'*out': 'AudiodevPerDirectionOptions',
'*latency': 'uint32' } }
##
# @AudiodevJackPerDirectionOptions:
#
# Options of the JACK backend that are used for both playback and
# recording.
#
# @server-name: select from among several possible concurrent server instances
# (default: environment variable $JACK_DEFAULT_SERVER if set, else "default")
#
# @client-name: the client name to use. The server will modify this name to
# create a unique variant, if needed unless @exact-name is true (default: the
# guest's name)
#
# @connect-ports: if set, a regular expression of JACK client port name(s) to
# monitor for and automatically connect to
#
# @start-server: start a jack server process if one is not already present
# (default: false)
#
# @exact-name: use the exact name requested otherwise JACK automatically
# generates a unique one, if needed (default: false)
#
# Since: 5.1
##
{ 'struct': 'AudiodevJackPerDirectionOptions',
'base': 'AudiodevPerDirectionOptions',
'data': {
'*server-name': 'str',
'*client-name': 'str',
'*connect-ports': 'str',
'*start-server': 'bool',
'*exact-name': 'bool' } }
##
# @AudiodevJackOptions:
#
# Options of the JACK audio backend.
#
# @in: options of the capture stream
#
# @out: options of the playback stream
#
# Since: 5.1
##
{ 'struct': 'AudiodevJackOptions',
'data': {
'*in': 'AudiodevJackPerDirectionOptions',
'*out': 'AudiodevJackPerDirectionOptions' } }
##
# @AudiodevOssPerDirectionOptions:
#
# Options of the OSS backend that are used for both playback and
# recording.
#
# @dev: file name of the OSS device (default '/dev/dsp')
#
# @buffer-count: number of buffers
#
# @try-poll: attempt to use poll mode, falling back to non-polling
# access on failure (default true)
#
# Since: 4.0
##
{ 'struct': 'AudiodevOssPerDirectionOptions',
'base': 'AudiodevPerDirectionOptions',
'data': {
'*dev': 'str',
'*buffer-count': 'uint32',
'*try-poll': 'bool' } }
##
# @AudiodevOssOptions:
#
# Options of the OSS audio backend.
#
# @in: options of the capture stream
#
# @out: options of the playback stream
#
# @try-mmap: try using memory-mapped access, falling back to
# non-memory-mapped access on failure (default true)
#
# @exclusive: open device in exclusive mode (vmix won't work)
# (default false)
#
# @dsp-policy: set the timing policy of the device (between 0 and 10,
# where smaller number means smaller latency but higher
# CPU usage) or -1 to use fragment mode (option ignored
# on some platforms) (default 5)
#
# Since: 4.0
##
{ 'struct': 'AudiodevOssOptions',
'data': {
'*in': 'AudiodevOssPerDirectionOptions',
'*out': 'AudiodevOssPerDirectionOptions',
'*try-mmap': 'bool',
'*exclusive': 'bool',
'*dsp-policy': 'uint32' } }
##
# @AudiodevPaPerDirectionOptions:
#
# Options of the Pulseaudio backend that are used for both playback and
# recording.
#
# @name: name of the sink/source to use
#
# @stream-name: name of the PulseAudio stream created by qemu. Can be
# used to identify the stream in PulseAudio when you
# create multiple PulseAudio devices or run multiple qemu
# instances (default: audiodev's id, since 4.2)
#
# @latency: latency you want PulseAudio to achieve in microseconds
# (default 15000)
#
# Since: 4.0
##
{ 'struct': 'AudiodevPaPerDirectionOptions',
'base': 'AudiodevPerDirectionOptions',
'data': {
'*name': 'str',
'*stream-name': 'str',
'*latency': 'uint32' } }
##
# @AudiodevPaOptions:
#
# Options of the PulseAudio audio backend.
#
# @in: options of the capture stream
#
# @out: options of the playback stream
#
# @server: PulseAudio server address (default: let PulseAudio choose)
#
# Since: 4.0
##
{ 'struct': 'AudiodevPaOptions',
'data': {
'*in': 'AudiodevPaPerDirectionOptions',
'*out': 'AudiodevPaPerDirectionOptions',
'*server': 'str' } }
##
# @AudiodevSdlPerDirectionOptions:
#
# Options of the SDL audio backend that are used for both playback and
# recording.
#
# @buffer-count: number of buffers (default 4)
#
# Since: 6.0
##
{ 'struct': 'AudiodevSdlPerDirectionOptions',
'base': 'AudiodevPerDirectionOptions',
'data': {
'*buffer-count': 'uint32' } }
##
# @AudiodevSdlOptions:
#
# Options of the SDL audio backend.
#
# @in: options of the recording stream
#
# @out: options of the playback stream
#
# Since: 6.0
##
{ 'struct': 'AudiodevSdlOptions',
'data': {
'*in': 'AudiodevSdlPerDirectionOptions',
'*out': 'AudiodevSdlPerDirectionOptions' } }
##
# @AudiodevWavOptions:
#
# Options of the wav audio backend.
#
# @in: options of the capture stream
#
# @out: options of the playback stream
#
# @path: name of the wav file to record (default 'qemu.wav')
#
# Since: 4.0
##
{ 'struct': 'AudiodevWavOptions',
'data': {
'*in': 'AudiodevPerDirectionOptions',
'*out': 'AudiodevPerDirectionOptions',
'*path': 'str' } }
##
# @AudioFormat:
#
# An enumeration of possible audio formats.
#
# @u8: unsigned 8 bit integer
#
# @s8: signed 8 bit integer
#
# @u16: unsigned 16 bit integer
#
# @s16: signed 16 bit integer
#
# @u32: unsigned 32 bit integer
#
# @s32: signed 32 bit integer
#
# @f32: single precision floating-point (since 5.0)
#
# Since: 4.0
##
{ 'enum': 'AudioFormat',
'data': [ 'u8', 's8', 'u16', 's16', 'u32', 's32', 'f32' ] }
##
# @AudiodevDriver:
#
# An enumeration of possible audio backend drivers.
#
# @jack: JACK audio backend (since 5.1)
#
# Since: 4.0
##
{ 'enum': 'AudiodevDriver',
'data': [ 'none', 'alsa', 'coreaudio', 'dbus', 'dsound', 'jack', 'oss', 'pa',
'sdl', 'spice', 'wav' ] }
##
# @Audiodev:
#
# Options of an audio backend.
#
# @id: identifier of the backend
#
# @driver: the backend driver to use
#
# @timer-period: timer period (in microseconds, 0: use lowest possible)
#
# Since: 4.0
##
{ 'union': 'Audiodev',
'base': {
'id': 'str',
'driver': 'AudiodevDriver',
'*timer-period': 'uint32' },
'discriminator': 'driver',
'data': {
'none': 'AudiodevGenericOptions',
'alsa': 'AudiodevAlsaOptions',
'coreaudio': 'AudiodevCoreaudioOptions',
'dbus': 'AudiodevGenericOptions',
'dsound': 'AudiodevDsoundOptions',
'jack': 'AudiodevJackOptions',
'oss': 'AudiodevOssOptions',
'pa': 'AudiodevPaOptions',
'sdl': 'AudiodevSdlOptions',
'spice': 'AudiodevGenericOptions',
'wav': 'AudiodevWavOptions' } }