Agora Java API Reference for Android
|
Public Member Functions | |
abstract int | setChannelProfile (int profile) |
abstract int | setClientRole (int role) |
abstract int | setClientRole (int role, ClientRoleOptions options) |
abstract int | sendCustomReportMessage (String id, String category, String event, String label, int value) |
abstract int | joinChannel (String token, String channelName, String optionalInfo, int optionalUid) |
abstract int | joinChannel (String token, String channelName, String optionalInfo, int optionalUid, ChannelMediaOptions options) |
abstract int | switchChannel (String token, String channelName) |
abstract int | switchChannel (String token, String channelName, ChannelMediaOptions options) |
abstract int | leaveChannel () |
abstract int | renewToken (String token) |
abstract int | registerLocalUserAccount (String appId, String userAccount) |
abstract int | joinChannelWithUserAccount (String token, String channelName, String userAccount) |
abstract int | joinChannelWithUserAccount (String token, String channelName, String userAccount, ChannelMediaOptions options) |
abstract int | setCloudProxy (int proxyType) |
abstract int | getUserInfoByUserAccount (String userAccount, UserInfo userInfo) |
abstract int | getUserInfoByUid (int uid, UserInfo userInfo) |
abstract int | enableWebSdkInteroperability (boolean enabled) |
abstract int | getConnectionState () |
abstract int | enableRemoteSuperResolution (int uid, boolean enable) |
abstract int | enableRemoteSuperResolution (boolean enable, int mode, int uid) |
abstract int | enableAudio () |
abstract int | disableAudio () |
abstract int | pauseAudio () |
abstract int | resumeAudio () |
abstract int | setAudioProfile (int profile, int scenario) |
abstract int | setHighQualityAudioParameters (boolean fullband, boolean stereo, boolean fullBitrate) |
abstract int | adjustRecordingSignalVolume (int volume) |
abstract int | adjustPlaybackSignalVolume (int volume) |
abstract int | enableAudioVolumeIndication (int interval, int smooth, boolean report_vad) |
abstract int | enableLocalVoicePitchCallback (int interval) |
abstract int | enableAudioQualityIndication (boolean enabled) |
abstract int | enableLocalAudio (boolean enabled) |
abstract int | muteLocalAudioStream (boolean muted) |
abstract int | muteRemoteAudioStream (int uid, boolean muted) |
abstract int | adjustUserPlaybackSignalVolume (int uid, int volume) |
abstract int | muteAllRemoteAudioStreams (boolean muted) |
abstract int | setDefaultMuteAllRemoteAudioStreams (boolean muted) |
abstract int | enableVideo () |
abstract int | disableVideo () |
abstract int | setVideoProfile (int profile, boolean swapWidthAndHeight) |
abstract int | setVideoProfile (int width, int height, int frameRate, int bitrate) |
abstract int | setVideoEncoderConfiguration (VideoEncoderConfiguration config) |
abstract int | setCameraCapturerConfiguration (CameraCapturerConfiguration config) |
abstract int | setupLocalVideo (VideoCanvas local) |
abstract int | setupRemoteVideo (VideoCanvas remote) |
abstract int | setLocalRenderMode (int renderMode) |
abstract int | setLocalRenderMode (int renderMode, int mirrorMode) |
abstract int | setRemoteRenderMode (int uid, int renderMode) |
abstract int | setRemoteRenderMode (int uid, int renderMode, int mirrorMode) |
abstract int | startPreview () |
abstract int | stopPreview () |
abstract int | enableLocalVideo (boolean enabled) |
abstract int | muteLocalVideoStream (boolean muted) |
abstract int | muteRemoteVideoStream (int uid, boolean muted) |
abstract int | muteAllRemoteVideoStreams (boolean muted) |
abstract int | setDefaultMuteAllRemoteVideoStreams (boolean muted) |
abstract int | setBeautyEffectOptions (boolean enabled, BeautyOptions options) |
abstract int | setLowlightEnhanceOptions (boolean enabled, LowLightEnhanceOptions options) |
abstract int | setVideoDenoiserOptions (boolean enabled, VideoDenoiserOptions options) |
abstract int | setColorEnhanceOptions (boolean enabled, ColorEnhanceOptions options) |
abstract int | enableVirtualBackground (boolean enabled, VirtualBackgroundSource backgroundSource) |
abstract int | setDefaultAudioRoutetoSpeakerphone (boolean defaultToSpeaker) |
abstract int | setEnableSpeakerphone (boolean enabled) |
abstract boolean | isSpeakerphoneEnabled () |
abstract int | enableInEarMonitoring (boolean enabled) |
abstract int | setInEarMonitoringVolume (int volume) |
abstract int | useExternalAudioDevice () |
abstract int | setLocalVoicePitch (double pitch) |
abstract int | setLocalVoiceEqualization (int bandFrequency, int bandGain) |
abstract int | setLocalVoiceReverb (int reverbKey, int value) |
abstract int | setLocalVoiceChanger (int voiceChanger) |
abstract int | setLocalVoiceReverbPreset (int preset) |
abstract int | setAudioEffectPreset (int preset) |
abstract int | setVoiceBeautifierPreset (int preset) |
abstract int | setVoiceConversionPreset (int preset) |
abstract int | setAudioEffectParameters (int preset, int param1, int param2) |
abstract int | setVoiceBeautifierParameters (int preset, int param1, int param2) |
abstract int | enableDeepLearningDenoise (boolean enabled) |
abstract int | enableSoundPositionIndication (boolean enabled) |
abstract int | setRemoteVoicePosition (int uid, double pan, double gain) |
abstract int | startAudioMixing (String filePath, boolean loopback, boolean replace, int cycle) |
abstract int | selectAudioTrack (int audioIndex) |
abstract int | getAudioTrackCount () |
abstract int | setAudioMixingDualMonoMode (int mode) |
abstract int | startAudioMixing (String filePath, boolean loopback, boolean replace, int cycle, int startPos) |
abstract int | setAudioMixingPlaybackSpeed (int speed) |
abstract int | stopAudioMixing () |
abstract int | pauseAudioMixing () |
abstract int | resumeAudioMixing () |
abstract int | adjustAudioMixingVolume (int volume) |
abstract int | adjustAudioMixingPlayoutVolume (int volume) |
abstract int | adjustAudioMixingPublishVolume (int volume) |
abstract int | getAudioMixingPlayoutVolume () |
abstract int | getAudioMixingPublishVolume () |
abstract int | getAudioMixingDuration () |
abstract int | getAudioMixingCurrentPosition () |
abstract int | setAudioMixingPosition (int pos) |
abstract int | setAudioMixingPitch (int pitch) |
abstract IAudioEffectManager | getAudioEffectManager () |
abstract int | getAudioFileInfo (String filePath) |
abstract int | startAudioRecording (String filePath, int quality) |
abstract int | startAudioRecording (String filePath, int sampleRate, int quality) |
abstract int | startAudioRecording (AudioRecordingConfiguration config) |
abstract int | stopAudioRecording () |
abstract int | startEchoTest () |
abstract int | startEchoTest (int intervalInSeconds) |
abstract int | startEchoTest (EchoTestConfiguration config) |
abstract int | stopEchoTest () |
abstract int | enableLastmileTest () |
abstract int | disableLastmileTest () |
abstract int | startLastmileProbeTest (LastmileProbeConfig config) |
abstract int | stopLastmileProbeTest () |
abstract int | setVideoSource (IVideoSource source) |
abstract int | setLocalVideoRenderer (IVideoSink render) |
abstract int | setRemoteVideoRenderer (int uid, IVideoSink render) |
abstract int | setExternalAudioSink (boolean enabled, int sampleRate, int channels) |
abstract int | pullPlaybackAudioFrame (byte[] data, int lengthInByte) |
abstract int | setExternalAudioSource (boolean enabled, int sampleRate, int channels) |
abstract int | pushExternalAudioFrame (byte[] data, long timestamp) |
abstract int | pushExternalAudioFrame (byte[] data, long timestamp, int sampleRate, int channels, int bytesPerSample, int sourcePos) |
abstract int | setExternalAudioSourceVolume (int sourcePos, int volume) |
abstract void | setExternalVideoSource (boolean enable, boolean useTexture, boolean pushMode) |
abstract boolean | pushExternalVideoFrame (AgoraVideoFrame frame) |
abstract boolean | isTextureEncodeSupported () |
abstract int | registerAudioFrameObserver (IAudioFrameObserver observer) |
abstract int | registerVideoEncodedFrameObserver (IVideoEncodedFrameObserver observer) |
abstract int | registerVideoFrameObserver (IVideoFrameObserver observer) |
abstract int | setRecordingAudioFrameParameters (int sampleRate, int channel, int mode, int samplesPerCall) |
abstract int | setPlaybackAudioFrameParameters (int sampleRate, int channel, int mode, int samplesPerCall) |
abstract int | setMixedAudioFrameParameters (int sampleRate, int samplesPerCall) |
abstract int | addVideoWatermark (AgoraImage watermark) |
abstract int | addVideoWatermark (String watermarkUrl, WatermarkOptions options) |
abstract int | clearVideoWatermarks () |
abstract int | setRemoteUserPriority (int uid, int userPriority) |
abstract int | setLocalPublishFallbackOption (int option) |
abstract int | setRemoteSubscribeFallbackOption (int option) |
abstract int | enableDualStreamMode (boolean enabled) |
abstract int | setRemoteVideoStreamType (int uid, int streamType) |
abstract int | setRemoteDefaultVideoStreamType (int streamType) |
abstract int | setEncryptionSecret (String secret) |
abstract int | setEncryptionMode (String encryptionMode) |
abstract int | enableEncryption (boolean enabled, EncryptionConfig config) |
abstract int | addPublishStreamUrl (String url, boolean transcodingEnabled) |
abstract int | removePublishStreamUrl (String url) |
abstract int | setLiveTranscoding (LiveTranscoding transcoding) |
abstract int | startRtmpStreamWithoutTranscoding (String url) |
abstract int | startRtmpStreamWithTranscoding (String url, LiveTranscoding transcoding) |
abstract int | updateRtmpTranscoding (LiveTranscoding transcoding) |
abstract int | stopRtmpStream (String url) |
abstract int | createDataStream (boolean reliable, boolean ordered) |
abstract int | createDataStream (DataStreamConfig config) |
abstract int | sendStreamMessage (int streamId, byte[] message) |
abstract int | setVideoQualityParameters (boolean preferFrameRateOverImageQuality) |
abstract int | setLocalVideoMirrorMode (int mode) |
abstract int | switchCamera () |
abstract boolean | isCameraZoomSupported () |
abstract boolean | isCameraTorchSupported () |
abstract boolean | isCameraFocusSupported () |
abstract boolean | isCameraExposurePositionSupported () |
abstract boolean | isCameraAutoFocusFaceModeSupported () |
abstract int | setCameraZoomFactor (float factor) |
abstract float | getCameraMaxZoomFactor () |
abstract int | setCameraFocusPositionInPreview (float positionX, float positionY) |
abstract int | setCameraExposurePosition (float positionXinView, float positionYinView) |
abstract int | enableFaceDetection (boolean enable) |
abstract int | setCameraTorchOn (boolean isOn) |
abstract int | setCameraAutoFocusFaceModeEnabled (boolean enabled) |
abstract String | getCallId () |
abstract int | rate (String callId, int rating, String description) |
abstract int | complain (String callId, String description) |
abstract int | setLogFile (String filePath) |
abstract int | setLogFilter (int filter) |
abstract int | setLogFileSize (int fileSizeInKBytes) |
abstract long | getNativeHandle () |
void | addHandler (IRtcEngineEventHandler handler) |
void | removeHandler (IRtcEngineEventHandler handler) |
abstract boolean | enableHighPerfWifiMode (boolean enable) |
abstract void | monitorHeadsetEvent (boolean monitor) |
abstract void | monitorBluetoothHeadsetEvent (boolean monitor) |
abstract void | setPreferHeadset (boolean enabled) |
abstract int | setParameters (String parameters) |
abstract String | getParameter (String parameter, String args) |
abstract int | registerMediaMetadataObserver (IMetadataObserver observer, int type) |
abstract int | startChannelMediaRelay (ChannelMediaRelayConfiguration channelMediaRelayConfiguration) |
abstract int | stopChannelMediaRelay () |
abstract int | updateChannelMediaRelay (ChannelMediaRelayConfiguration channelMediaRelayConfiguration) |
abstract int | pauseAllChannelMediaRelay () |
abstract int | resumeAllChannelMediaRelay () |
abstract RtcChannel | createRtcChannel (String channelId) |
abstract int | takeSnapshot (String channel, int uid, String filePath) |
abstract int | startScreenCapture (ScreenCaptureParameters screenCaptureParameters) |
abstract int | stopScreenCapture () |
abstract int | updateScreenCaptureParameters (boolean captureVideo, boolean captureAudio, ScreenCaptureParameters.VideoCaptureParameters videoCaptureParameters) |
Static Public Member Functions | |
static synchronized RtcEngine | create (Context context, String appId, IRtcEngineEventHandler handler) throws Exception |
static synchronized RtcEngine | create (RtcEngineConfig config) throws Exception |
static synchronized void | destroy () |
static SurfaceView | CreateRendererView (Context context) |
static TextureView | CreateTextureView (Context context) |
static int | getRecommendedEncoderType () |
static String | getSdkVersion () |
static String | getMediaEngineVersion () |
static String | getErrorDescription (int error) |
static void | setAgoraLibPath (String path) |
Static Protected Attributes | |
static String | externalLibPath = null |
RtcEngine
is the main interface class of the Agora SDK.
Call the methods of this class to use all functionalities of the SDK. We recommend calling the RtcEngine API methods in the same thread instead of in multiple threads. In previous versions, this class was named AgoraAudio, and was renamed to RtcEngine from v1.0.
|
static |
Creates an RtcEngine
instance.
Unless otherwise specified, all the methods provided by the RtcEngine
class are executed asynchronously. Agora recommends calling these methods in the same thread.
RtcEngine
instance before calling any other method.RtcEngine
instance either by calling this method or by calling create2
. The difference between create2
and this method is that create2
enables you to specify the region for connection.RtcEngine
instance for an app for now.context | The context of Android Activity. |
appId | The App ID issued to you by Agora. See How to get the App ID. Only users in apps with the same App ID can join the same channel and communicate with each other. Use an App ID to create only one RtcEngine instance. To change your App ID, call destroy to destroy the current RtcEngine instance, and after destroy returns 0, call create to create an RtcEngine instance with the new App ID. |
handler | IRtcEngineEventHandler is an abstract class providing default implementation. The SDK uses this class to report to the app on SDK runtime events. |
RtcEngine
instance, if the method call succeeds.Exception | Fails to create an RtcEngine instance. |
|
static |
Creates an RtcEngine
instance.
Unless otherwise specified, all the methods provided by the RtcEngine
class are executed asynchronously. Agora recommends calling these methods in the same thread.
RtcEngine
instance before calling any other method.RtcEngine
instance either by calling this method or by calling create1
. The difference between create1
and this method is that this method enables you to specify the region for connection.RtcEngine
instance for an app for now.config | Configurations for the RtcEngine instance. For details, see RtcEngineConfig . |
RtcEngine
instance, if the method call succeeds.Exception | Fails to create an RtcEngine instance. |
|
static |
Destroys the RtcEngine
instance and releases all resources used by the Agora SDK.
Use this method for apps in which users occasionally make voice or video calls. When users do not make calls, you can free up resources for other operations. Once you call destroy
to destroy the created RtcEngine
instance, you cannot use any method or callback in the SDK any more. If you want to use the real-time communication functions again, you must call create
to create a new RtcEngine
instance.
destroy
is a synchronous method and the app cannot move on to another task until the execution completes, Agora suggests calling this method in a sub-thread to avoid congestion in the main thread. Besides, you cannot call destroy
in any method or callback of the SDK. Otherwise, the SDK cannot release the resources occupied by the IRtcEngine
instance until the callbacks return results, which may result in a deadlock.RtcEngine
instance after destroying the current one, ensure that you wait till the destroy
method completes executing.
|
abstract |
Sets the channel profile of the Agora RtcEngine.
After initialization, the SDK uses the CHANNEL_PROFILE_COMMUNICATION
channel profile by default. You can call setChannelProfile
to set the channel profile. The Agora RtcEngine differentiates channel profiles and applies different optimization algorithms accordingly. For example, it prioritizes smoothness and low latency for a video call, and prioritizes video quality for a video live interactive streaming.
profile | The channel profile of the Agora RtcEngine:
|
|
abstract |
Sets the role of the user in interactive live streaming.
After calling setChannelProfile(CHANNEL_PROFILE_LIVE_BROADCASTING)
, the SDK sets the user role as audience by default. You can call setClientRole
to set the user role as host.
You can call this method either before or after joining a channel. If you call this method to switch the user role after joining a channel, the SDK automatically does the following:
muteLocalAudioStream
and muteLocalVideoStream
to change the publishing state.onClientRoleChanged
or onClientRoleChangeFailed
on the local client.onUserJoined
or onUserOffline
(USER_OFFLINE_BECOME_AUDIENCE) on the remote client.LIVE_BROADCASTING
profile only (when the profile parameter in setChannelProfile
is set as CHANNEL_PROFILE_LIVE_BROADCASTING
).role | The role of a user in interactive live streaming:
|
ERR_FAILED
): A general error occurs (no specified reason).ERR_INVALID_ARGUMENT
): The parameter is invalid.ERR_REFUSED
): The request is rejected. In multichannel scenarios, if you have set any of the following in one channel, the SDK returns this error code when the user switches the user role to host in another channel:joinChannel
with the options parameter and use the default settings publishLocalAudio = true
or publishLocalVideo = true
.setClientRole
to set the user role as host.muteLocalAudioStream(false)
or muteLocalVideoStream(false)
.ERR_NOT_INITIALIZED
): The SDK is not initialized.
|
abstract |
Sets the role of a user in a live interactive streaming.
After calling setChannelProfile(CHANNEL_PROFILE_LIVE_BROADCASTING)
, the SDK sets the user role as audience by default. You can call setClientRole
to set the user role as host.
You can call this method either before or after joining a channel. If you call this method to switch the user role after joining a channel, the SDK automatically does the following:
muteLocalAudioStream
and muteLocalVideoStream
to change the publishing state.onClientRoleChanged
or onClientRoleChangeFailed
on the local client.onUserJoined
or onUserOffline
(USER_OFFLINE_BECOME_AUDIENCE) on the remote client.LIVE_BROADCASTING
profile only (when the profile
parameter in setChannelProfile
is set as CHANNEL_PROFILE_LIVE_BROADCASTING).
setClientRole1
is that this method can set the user level in addition to the user role.Sample code
role | The role of a user in a live interactive streaming:
|
options | The detailed options of a user, including user level. See ClientRoleOptions . |
ERR_FAILED
): A general error occurs (no specified reason).ERR_INVALID_ARGUMENT
): The parameter is invalid.ERR_REFUSED
): The request is rejected. In multichannel scenarios, if you have set any of the following in one channel, the SDK returns this error code when the user switches the user role to host in another channel:joinChannel
with the options parameter and use the default settings publishLocalAudio = true
or publishLocalVideo = true
.setClientRole
to set the user role as host.muteLocalAudioStream(false)
or muteLocalVideoStream(false)
.ERR_NOT_INITIALIZED
): The SDK is not initialized.
|
abstract |
Agora supports reporting and analyzing customized messages.
This function is in the beta stage with a free trial. The ability provided in its beta test version is reporting a maximum of 10 message pieces within 6 seconds, with each message piece not exceeding 256 bytes and each string not exceeding 100 bytes. To try out this function, contact support@agora.io and discuss the format of customized messages with us.
|
abstract |
Allows a user to join a channel.
Users in the same channel can talk to each other, and multiple users in the same channel can start a group chat. Users with different App IDs cannot call each other.
You must call the leaveChannel
method to exit the current call before joining another channel.
A successful method call of joinChannel
triggers the following callbacks:
onJoinChannelSuccess
.onUserJoined
, if the user joining the channel is in the COMMUNICATION
profile, or is a host in the LIVE_BROADCASTING
profile.When the connection between the client and Agora server is interrupted due to poor network conditions, the SDK tries reconnecting to the server. When the local client successfully rejoins the channel, the SDK triggers the onRejoinChannelSuccess
callback on the local client.
Once the user joins the channel (switches to another channel), the user subscribes to the audio and video streams of all the other users in the channel by default, giving rise to usage and billing calculation. If you do not want to subscribe to a specified stream or all remote streams, call the mute
methods accordingly.
token | The token generated at your server. For details, see Authenticate Your Users with Tokens. |
channelName | The unique channel name for the AgoraRTC session in the string format. The string length must be less than 64 bytes. Supported character scopes are:
|
optionalInfo | Reserved for future use. |
optionalUid | (Optional) User ID. A 32-bit unsigned integer with a value ranging from 1 to (2^32-1). optionalUid must be unique. If optionalUid is not assigned (or set to 0), the SDK assigns and returns uid in the onJoinChannelSuccess callback. Your app must record and maintain the returned uid since the SDK does not do so. The uid is represented as a 32-bit unsigned integer in the SDK. Since unsigned integers are not supported by Java, the uid is handled as a 32-bit signed integer and larger numbers are interpreted as negative numbers in Java. If necessary, the uid can be converted to a 64-bit integer through “uid&0xffffffffL”. |
RtcChannel
object with the same channel name.RtcChannel
object. When you join a channel created by the RtcEngine
object, the SDK publishes the local audio and video streams to that channel by default. Because the SDK does not support publishing a local stream to more than one channel simultaneously, an error occurs in this occasion.RtcEngine
channel at a time. Therefore, the SDK returns this error code when a user who has already joined an RtcEngine
channel calls the joining channel method of the RtcEngine
class with a valid channel name.
|
abstract |
Joins a channel with the user ID, and configures whether to publish or automatically subscribe to the audio or video streams.
Users in the same channel can talk to each other, and multiple users in the same channel can start a group chat. Users with different App IDs cannot call each other.
You must call the leaveChannel
method to exit the current call before entering another channel.
A successful joinChannel
method call triggers the following callbacks:
onJoinChannelSuccess
onUserJoined
, if the user joining the channel is in the COMMUNICATION
profile, or is a host in the LIVE_BROADCASTING
profile.When the connection between the client and the Agora server is interrupted due to poor network conditions, the SDK tries reconnecting to the server. When the local client successfully rejoins the channel, the SDK triggers the onRejoinChannelSuccess
callback on the local client.
joinChannel
[1/2], this method has the options
parameter, which configures whether the user publish or automatically subscribes to the audio and video streams in the channel when joining the channel. By default, the user publishes the local audio and video streams and automatically subscribes to the audio and video streams of all the other users in the channel. Subscribing incurs all associated usage costs. To unsubscribe, set the options
parameter or call the mute
methods accordingly.create
method for creating an RtcEngine
object.token | The token generated at your server. For details, see Authenticate Your Users with Tokens. |
channelName | The unique channel name for the AgoraRTC session in the string format. The string length must be less than 64 bytes. Supported character scopes are:
|
optionalInfo | Reserved for future use. |
optionalUid | (Optional) User ID. A 32-bit unsigned integer with a value ranging from 1 to (2^32-1). optionalUid must be unique. If optionalUid is not assigned (or set to 0), the SDK assigns and returns uid in the onJoinChannelSuccess callback. Your app must record and maintain the returned uid, because the SDK does not do so. The uid is represented as a 32-bit unsigned integer in the SDK. Since unsigned integers are not supported by Java, the uid is handled as a 32-bit signed integer and larger numbers are interpreted as negative numbers in Java. If necessary, the uid can be converted to a 64-bit integer through “uid&0xffffffffL”.The ID of each user in the channel should be unique. If you want to join the same channel from different devices, ensure that the user IDs in all devices are different. |
options | The channel media options: ChannelMediaOptions . |
RtcChannel
object with the same channel name.RtcChannel
object.RtcEngine
channel at a time. Therefore, the SDK returns this error code when a user who has already joined an RtcEngine
channel calls the joining channel method of the RtcEngine
class with a valid channel name.
|
abstract |
Switches to a different channel.
This method allows the audience of a LIVE_BROADCASTING
channel to switch to a different channel.
After the user successfully switches to another channel, the onLeaveChannel
and onJoinChannelSuccess
callbacks are triggered to indicate that the user has left the original channel and joined a new one.
Once the user joins the channel (switches to another channel), the user subscribes to the audio and video streams of all the other users in the channel by default, giving rise to usage and billing calculation. If you do not want to subscribe to a specified stream or all remote streams, call the mute
methods accordingly.
LIVE_BROADCASTING
channel only.token | The token generated at your server. For details, see Authenticate Your Users with Tokens. |
channelName | Unique channel name for the AgoraRTC session in the string format. The string length must be less than 64 bytes. Supported character scopes are:
|
|
abstract |
Switches to a different channel, and configures whether to automatically subscribe to audio or video streams in the target channel.
This method allows the audience of a LIVE_BROADCASTING
channel to switch to a different channel.
After the user successfully switches to another channel, the onLeaveChannel
and onJoinChannelSuccess
callbacks are triggered to indicate that the user has left the original channel and joined a new one.
LIVE_BROADCASTING
channel only.switchChannel
[1/2] is that the former adds the options
parameter to configure whether the end user automatically subscribes to all remote audio and video streams in the target channel. By default, the user subscribes to the audio and video streams of all the other users in the target channel, thus incurring all associated usage costs. To unsubscribe, set the options
parameter or call the mute
methods accordingly.token | The token generated at your server. For details, see Authenticate Your Users with Tokens. |
channelName | Unique channel name for the AgoraRTC session in the string format. The string length must be less than 64 bytes. Supported character scopes are:
|
options | The channel media options: ChannelMediaOptions . |
|
abstract |
Allows a user to leave a channel.
After joining a channel, the user must call the leaveChannel
method to end the call before joining another channel. This method returns 0 if the user leaves the channel and releases all resources related to the call. This method call is asynchronous, and the user has not exited the channel when the method call returns. Once the user leaves the channel, the SDK triggers the onLeaveChannel
callback.
A successful method call of leaveChannel
triggers the following callbacks:
onLeaveChannel
.onUserOffline
, if the user leaving the channel is in the Communication channel, or is a host in the LIVE_BROADCASTING
profile.destroy
method immediately after calling the leaveChannel
method, the leaveChannel process interrupts, and the SDK does not trigger the onLeaveChannel
callback.leaveChannel
method during CDN live streaming, the SDK triggers the removePublishStreamUrl
method.
|
abstract |
Renews the token when the current token expires.
The token expires after a period of time once the token schema is enabled when:
onTokenPrivilegeWillExpire
callback, oronConnectionStateChanged
callback reports the CONNECTION_CHANGED_TOKEN_EXPIRED(9)
error.The app should retrieve a new token from the server and call this method to renew it. Failure to do so results in the SDK disconnecting from the server.
token | The new token. |
|
abstract |
Registers a user account.
Once registered, the user account can be used to identify the local user when the user joins the channel.
After the user successfully registers a user account, the SDK triggers the onLocalUserRegistered
callback on the local client, reporting the user ID and user account of the local user.
To join a channel with a user account, you can choose either of the following:
registerLocalUserAccount
method to create a user account, and then the joinChannelWithUserAccount
method to join the channel.joinChannelWithUserAccount
method to join the channel.The difference between the two is that for the former, the time elapsed between calling the joinChannelWithUserAccount method and joining the channel is shorter than the latter.
userAccount
parameter. Otherwise, this method does not take effect.userAccount
parameter is unique in the channel.appId | The App ID of your project. |
userAccount | The user account. The maximum length of this parameter is 255 bytes. Ensure that you set this parameter and do not set it as null. Supported character scopes are:
|
|
abstract |
Joins the channel with a user account.
After the user successfully joins the channel, the SDK triggers the following callbacks:
onLocalUserRegistered
and onJoinChannelSuccess
.onUserJoined
and onUserInfoUpdated
, if the user joining the channel is in the COMMUNICATION
profile, or is a host in the LIVE_BROADCASTING
profile.Once the user joins the channel (switches to another channel), the user subscribes to the audio and video streams of all the other users in the channel by default, giving rise to usage and billing calculation. If you do not want to subscribe to a specified stream or all remote streams, call the mute
methods accordingly.
token | The token generated at your server. For details, see Authenticate Your Users with Tokens. |
channelName | The channel name. The maximum length of this parameter is 64 bytes. Supported character scopes are:
|
userAccount | The user account. The maximum length of this parameter is 255 bytes. Ensure that the user account is unique and do not set it as null.
|
ERR_INVALID_ARGUMENT(-2)
ERR_NOT_READY(-3)
ERR_REFUSED(-5)
ERR_JOIN_CHANNEL_REJECTED(-17)
: The request to join the channel is rejected. The SDK supports joining only one RtcEngine
channel at a time. Therefore, the SDK returns this error code when a user who has already joined an RtcEngine
channel calls the joining channel method of the RtcEngine
class with a valid channel name.
|
abstract |
Joins the channel with a user account, and configures whether to publish or automatically subscribe to audio or video streams after joining the channel.
After the user successfully joins the channel, the SDK triggers the following callbacks:
onLocalUserRegistered
and onJoinChannelSuccess
.onUserJoined
and onUserInfoUpdated
, if the user joining the channel is in the COMMUNICATION
profile, or is a host in the LIVE_BROADCASTING
profile.joinChannelWithUserAccount
[1/2], this method has the options
parameter, which configures whether the user publishes or automatically subscribes to the audio and video streams in the channel when joining the channel. By default, the user publishes the local audio and video streams and automatically subscribes to the audio and video streams of all the other users in the channel. Subscribing incurs all associated usage costs. To unsubscribe, set the options
parameter or call the mute methods accordingly.token | The token generated at your server. For details, see Authenticate Your Users with Tokens. |
channelName | The channel name. The maximum length of this parameter is 64 bytes. Supported character scopes are:
|
userAccount | The user account. The maximum length of this parameter is 255 bytes. Ensure that you set this parameter and do not set it as null.
|
options | The channel media options: ChannelMediaOptions . |
ERR_INVALID_ARGUMENT(-2)
ERR_NOT_READY(-3)
ERR_REFUSED(-5)
ERR_JOIN_CHANNEL_REJECTED(-17)
: The request to join the channel is rejected. The SDK supports joining only one RtcEngine
channel at a time. Therefore, the SDK returns this error code when a user who has already joined an RtcEngine
channel calls the joining channel method of the RtcEngine
class with a valid channel name.
|
abstract |
Sets the Agora cloud proxy service.
When users' network access is restricted by a firewall, configure the firewall to allow specific IP addresses and ports provided by Agora; then, call this method to enable the cloud proxy and set the cloud proxy type with the proxyType parameter.
After a successfully cloud proxy connection, the SDK triggers the onConnectionStateChanged
(CONNECTION_STATE_CONNECTING, CONNECTION_CHANGED_SETTING_PROXY_SERVER)
callback.
As of v3.6.2, when a user calls this method and then joins a channel successfully, the SDK triggers the onProxyConnected
callback to report the user ID, the proxy type connected, and the time elapsed from the user calling joinChannel
until this callback is triggered.
To disable the cloud proxy that has been set, call setCloudProxy(TRANSPORT_TYPE_NONE_PROXY)
. To change the cloud proxy type that has been set, call setCloudProxy(TRANSPORT_TYPE_NONE_PROXY)
first, and then call setCloudProxy
with the desired proxyType
.
startAudioMixing
to play online music files in the HTTP protocol.proxyType | The cloud proxy type. This parameter is required, and the SDK reports an error if you do not pass in a value. The cloud proxy types include:
|
-2(ERR_INVALID_ARGUMENT)
: The parameter is invalid.-7(ERR_NOT_INITIALIZED)
: The SDK is not initialized.
|
abstract |
Gets the user information by passing in the user account.
After a remote user joins the channel, the SDK gets the user ID and user account of the remote user, caches them in a mapping table object (UserInfo
), and triggers the onUserInfoUpdated
callback on the local client.
After receiving the onUserInfoUpdated
callback, you can call this method to get the user ID of the remote user from the userInfo object by passing in the user account.
userAccount | The user account of the user. Ensure that you set this parameter. |
userInfo | [in/out] A userInfo object that identifies the user. For details, see UserInfo .
|
|
abstract |
Gets the user information by passing in the user ID.
After a remote user joins the channel, the SDK gets the user ID and user account of the remote user, caches them in a mapping table object (UserInfo
), and triggers the onUserInfoUpdated
callback on the local client.
After receiving the onUserInfoUpdated
callback, you can call this method to get the user ID of the remote user from the userInfo object by passing in the user account.
uid | The user ID of the user. Ensure that you set this parameter. |
userInfo | [in/out] A userInfo object that identifies the user. For details, see UserInfo .
|
|
abstract |
Enables interoperability with the Agora Web SDK (LIVE_BROADCASTING
profile only).
If the channel has Web SDK users, ensure that you call this method, or the video of the Native user will be a black screen for the Web user.
Use this method when the channel profile is LIVE_BROADCASTING
. Interoperability with the Agora Web SDK is enabled by default when the channel profile is Communication.
enabled | Whether to enable/disable interoperability with the Agora Web SDK:
|
|
abstract |
Gets the connection state of the SDK.
CONNECTION_STATE_DISCONNECTED(1)
: The SDK is disconnected from Agora edge server.CONNECTION_STATE_CONNECTING(2)
: The SDK is connecting to Agora edge server.CONNECTION_STATE_CONNECTED(3)
: The SDK joined a channel and is connected to Agora edge server. You can now publish or subscribe to a media stream in the channel.CONNECTION_STATE_RECONNECTING(4)
: The SDK keeps rejoining the channel after being disconnected from a joined channel because of network issues.CONNECTION_STATE_FAILED(5)
: The SDK fails to join the channel.
|
abstract |
Enables/Disables the super-resolution algorithm for a remote user's video stream.
enableRemoteSuperResolution
[2/2] instead.This feature effectively boosts the resolution of a remote user's video seen by the local user. If the original resolution of a remote user's video is a × b, the local user's device can render the remote video at a resolution of 2a × 2b after you enable this feature.
After calling this method, the SDK triggers the onUserSuperResolutionEnabled
callback to report whether you have successfully enabled super resolution.
libagora_super_resolution_extension.so
dynamic library into your project.uid | The user ID of the remote user. |
enable | Determines whether to enable super resolution for the remote user's video:
|
ERR_MODULE_NOT_FOUND
): The dynamic library for super resolution is not integrated.
|
abstract |
Enables/Disables the super-resolution algorithm for a remote user's video stream. This is a beta feature.
This feature effectively boosts the resolution of a remote user's video seen by the local user. If the original resolution of a remote user's video is a × b, the local user's device can render the remote video at a resolution of 2a × 2b after you enable this feature.
After calling this method, the SDK triggers the onUserSuperResolutionEnabled
callback to report whether you have successfully enabled super resolution.
libagora_super_resolution_extension.so
dynamic libraries into your project.enable | Determines whether to enable super resolution for the remote user's video:
|
mode | The mode of super resolution:
|
uid | The user ID of the remote user. This parameter only applies when mode is set as SR_MODE_MANUAL(0) . |
ERR_MODULE_NOT_FOUND
): The dynamic library for super resolution is not integrated.
|
abstract |
Enables the audio module.
The audio module is enabled by default.
leaveChannel
method. You can call this method either before or after joining a channel.enableLocalAudio
: Whether to enable the microphone to create the local audio stream.muteLocalAudioStream
: Whether to publish the local audio stream.muteRemoteAudioStream
: Whether to subscribe to and play the remote audio stream.muteAllRemoteAudioStreams
: Whether to subscribe to and play all remote audio streams.
|
abstract |
Disables the audio module.
leaveChannel
method. You can call this method either before or after joining a channel.enableLocalAudio
: Whether to enable the microphone to create the local audio stream.muteLocalAudioStream
: Whether to publish the local audio stream.muteRemoteAudioStream
: Whether to subscribe to and play the remote audio stream.muteAllRemoteAudioStreams
: Whether to subscribe to and play all remote audio streams.
|
abstract |
Disables the audio function in the channel.
enableAudio
method instead.
|
abstract |
Resumes the audio playback in the channel.
disableAudio
method instead.
|
abstract |
Sets the audio parameters and application scenarios.
joinChannel
method.COMMUNICATION
and LIVE_BROADCASTING
profiles, the bitrate may be different from your settings due to network self-adaptation.profile
as MUSIC_HIGH_QUALITY (4)
and scenario
as GAME_STREAMING (3)
. For example, for music education scenarios.profile | Sets the sample rate, bitrate, encoding mode, and the number of channels. See Audio Profile . |
scenario | Sets the audio application scenario. See AudioScenario . Under different audio scenarios, the device uses different volume types. For details, see What is the difference between the in-call volume and the media volume?. |
|
abstract |
Sets the high-quality audio preferences.
setAudioProfile
method. Call this method and set all parameters before joining a channel. Do not call this method again after joining a channel.fullband | Whether to enable full-band codec (48-kHz sample rate), not compatible with versions earlier than v1.7.4:
|
stereo | Whether to enable stereo codec, not compatible with versions earlier than v1.7.4:
|
fullBitrate | Whether to enable high-bitrate mode. Recommended in voice-only mode:
|
|
abstract |
Adjusts the volume of the signal captured by the microphone.
volume | The volume of the signal captured by the microphone. The value ranges between 0 and 400, including the following:
|
|
abstract |
Adjusts the playback signal volume of all remote users.
adjustPlaybackSignalVolume
and adjustAudioMixingVolume
, and set volume
as 0.volume | The playback volume. The value ranges between 0 and 400, including the following:
|
|
abstract |
Enables the reporting of users' volume indication. This method enables the SDK to regularly report the volume information of the local user who sends a stream and remote users (up to three) whose instantaneous volumes are the highest to the app. Once you call this method and users send streams in the channel, the SDK triggers the onAudioVolumeIndication
callback at the time interval set in this method.
interval | Sets the time interval between two consecutive volume indications:
|
smooth | The smoothing factor sets the sensitivity of the audio volume indicator. The value ranges between 0 and 10. The greater the value, the more sensitive the indicator. The recommended value is 3. |
report_vad |
|
|
abstract |
Enables reporting the voice pitch of the local user.
This method enables the SDK to regularly report the voice pitch of the local user. After the local audio capture is enabled, and you call this method, the SDK triggers the onLocalVoicePitchInHz
callback at the time interval set in this method.
interval | Sets the time interval at which the SDK triggers the onLocalVoicePitchInHz callback:
|
|
abstract |
Enables the audio quality callbacks.
The SDK triggers the onAudioQuality
callback after this method is enabled.
enabled | Whether to enable/disable the audio quality callback:
|
|
abstract |
Enables/Disables the local audio capture.
The audio function is enabled by default. This method disables/re-enables the local audio function, that is, to stop or restart local audio capture and processing.
This method does not affect receiving the remote audio streams, and enableLocalAudio(false)
is applicable to scenarios where the user wants to receive remote audio streams without sending any audio stream to other users in the channel.
Once the local audio function is disabled or re-enabled, the SDK triggers the onLocalAudioStateChanged
callback, which reports LOCAL_AUDIO_STREAM_STATE_STOPPED(0)
or LOCAL_AUDIO_STREAM_STATE_CAPTURING(1)
.
muteLocalAudioStream
method:enableLocalAudio
: Disables/Re-enables the local audio capture and processing. If you disable or re-enable local audio sampling using the enableLocalAudio
method, the local user may hear a pause in the remote audio playback.muteLocalAudioStream
: Stops/Continues sending the local audio streams.enabled | Whether to disable/re-enable the local audio function:
|
|
abstract |
Stops or resumes publishing the local audio stream.
As of v3.4.5, this method only sets the publishing state of the audio stream in the channel of RtcEngine
.
A successful method call triggers the onUserMuteAudio
callback on the remote client.
You can only publish the local stream in one channel at a time. If you create multiple channels, ensure that you only call muteLocalAudioStream(false)
in one channel; otherwise, the method call fails, and the SDK returns -5 (ERR_REFUSED)
.
joinChannel
[2/2] and setClientRole
methods. For details, see Set the Publishing State.muted | Sets whether to stop publishing the local audio stream.
|
-5 (ERR_REFUSED)
: The request is rejected.
|
abstract |
Stops or resumes subscribing to the audio stream of a specified user.
uid | The user ID of the specified remote user. |
muted | Sets whether to stop subscribing to the audio stream of a specified user.
|
|
abstract |
Adjusts the playback signal volume of a specified remote user.
You can all this method as many times as necessary to adjust the playback volume of different remote users, or to repeatedly adjust the playback volume of the same remote user.
uid | ID of the remote user. |
volume | The playback volume of the specified remote user. The value ranges between 0 and 100, including the following:
|
|
abstract |
Stops or resumes subscribing to the audio streams of all remote users.
After successfully calling this method, the local user stops or resumes subscribing to the audio streams of all remote users, including all subsequent users.
setDefaultMuteAllRemoteAudioStreams
. Agora recommend not calling muteAllRemoteAudioStreams
and setDefaultMuteAllRemoteAudioStreams
together; otherwise, the settings may not take effect. See Set the Subscribing State.muted | Sets whether to stop subscribing to the audio streams of all remote users.
|
|
abstract |
Sets whether to receive all remote audio streams by default.
Stops or resumes subscribing to the audio streams of all remote users by default.
Call this method after joining a channel. After successfully calling this method, the local user stops or resumes subscribing to the audio streams of all subsequent users.
setDefaultMuteAllRemoteAudioStreams(true)
, do the following:muteRemoteAudioStream(false)
, and specify the user ID.muteRemoteAudioStream(false)
multiple times.muted | Sets whether to stop subscribing to the audio streams of all remote users by default.
|
|
abstract |
Enables the video module.
You can call this method either before joining a channel or during a call. If you call this method before joining a channel, the service starts in the video mode. If you call this method during an audio call, the audio mode switches to the video mode.
A successful enableVideo method call triggers the onUserEnableVideo
(true) callback on the remote client.
To disable the video, call the disableVideo
method.
leaveChannel
method. You can call this method either before or after joining a channel.enableLocalVideo
: Whether to enable the camera to create the local video stream.muteLocalVideoStream
: Whether to publish the local video stream.muteRemoteVideoStream
: Whether to subscribe to and play the remote video stream.muteAllRemoteVideoStreams
: Whether to subscribe to and play all remote video streams.
|
abstract |
Disables the video module.
You can call this method before joining a channel or during a call. If you call this method before joining a channel, the service starts in audio mode. If you call this method during a video call, the video mode switches to the audio mode.
A successful disableVideo method call triggers the onUserEnableVideo
(false) callback on the remote client.
To enable the video mode, call the enableVideo
method.
leaveChannel
method. You can call this method either before or after joining a channel. enableLocalVideo
: Whether to enable the camera to create the local video stream. muteLocalVideoStream
: Whether to publish the local video stream. muteRemoteVideoStream
: Whether to subscribe to and play the remote video stream. muteAllRemoteVideoStreams
: Whether to subscribe to and play all remote video streams.
|
abstract |
Sets the video encoding profile.
setVideoEncoderConfiguration
method to set the video profile.You can call this method either before or after joining a channel.
If you do not need to change the video encoding profile after joining a channel, call this method before calling the enableVideo
method to reduce the render time of the first video frame.
Each video encoding profile includes a set of parameters, such as the resolution, frame rate, and bitrate. When the camera does not support the specified resolution, the SDK chooses a suitable camera resolution, while the encoder resolution is specified by this method.
profile | Sets the video encoding profile. See VideoProfile . |
swapWidthAndHeight | The width and height of the output video is consistent with the set video profile. swapWidthAndHeight sets whether to swap the width and height of the stream:
|
|
abstract |
Manually sets the video encoding profile.
setVideoEncoderConfiguration
method to set the video profile.Each video encoding profile includes a set of parameters, such as the resolution, frame rate, and bitrate. If the camera device does not support the specified resolution, the SDK automatically chooses a suitable camera resolution, keeping the encoder resolution specified by this method. If the user does not set the video encoding profile after joining a channel, We recommend calling this method before calling the enableVideo
method to minimize the time delay in receiving the first video frame.
width | Sets the width of the video. The maximum value of width × height is 1280 × 720. |
height | Sets the height of the video. The maximum value of width × height is 1280 × 720. |
frameRate | Sets the frame rate of the video. The highest value is 30. You can set frameRate as 5, 10, 15, 24, and 30. |
bitrate | Sets the bitrate of the video. You need to manually work out the bitrate according to the width, height, and frame rate. For the correlation between the width, height, and frame rate. See the table in Bitrate . |
With the same width and height, the bitrate varies with the frame rate:
If you set a bitrate beyond the proper range, the SDK automatically adjusts the bitrate to a value within the proper range.
|
abstract |
Sets the video encoder configuration.
setVideoProfile
method.Each video encoder configuration corresponds to a set of video parameters, including the resolution, frame rate, bitrate, and video orientation. The parameters specified in this method are the maximum values under ideal network conditions. If the video engine cannot render the video using the specified parameters due to poor network conditions, the parameters further down the list are considered until a successful configuration is found.
If you do not set the video encoder configuration after joining the channel, you can call this method before calling the enableVideo
method to reduce the render time of the first video frame.
config | The local video encoder configuration. See VideoEncoderConfiguration . |
|
abstract |
Sets the camera capturer configuration.
For a video call or live streaming, generally the SDK controls the camera output parameters. When the default camera capture settings do not meet special requirements or cause performance problems, we recommend using this method to set the camera capturer configuration:
setVideoEncoderConfiguration
, processing video frames requires extra CPU and RAM usage and degrades performance. We recommend setting config
as CAPTURER_OUTPUT_PREFERENCE_PERFORMANCE(1)
to avoid such problems. config
as CAPTURER_OUTPUT_PREFERENCE_PERFORMANCE(1)
to optimize CPU and RAM usage. config
as CAPTURER_OUTPUT_PREFERENCE_PREVIEW(2)
. CAPTURER_OUTPUT_PREFERENCE_MANUAL(3)
. config | The camera capturer configuration. For details, see CameraCapturerConfiguration . |
joinChannel
, enableVideo
, or enableLocalVideo
, depending on which method you use to turn on your local camera.
|
abstract |
Initializes the local video view.
This method initializes the video view of the local stream on the local device. It affects only the video view that the local user sees, not the published local video stream.
Call this method to bind the loca video stream to a video view and to set the rendering and mirror modes of the video view.
setLocalRenderMode
.local | Sets the local video view and settings. See VideoCanvas . |
|
abstract |
Initializes the video view of a remote user.
This method initializes the video view of a remote stream on the local device. It affects only the video view that the local user sees.
Call this method to bind the remote video stream to a video view and to set the rendering and mirror modes of the video view.
Typically, the app specifies the uid
of the remote user sending the video in the method call before the remote user joins a channel. If the uid
of the remote user is unknown to the app, set the uid
when the app receives the onUserJoined
callback.
If the Video Recording function is enabled, the Video Recording Service joins the channel as a dummy client, causing other clients to also receive the onUserJoined
callback. Do not bind the dummy client to the app view because the dummy client does not send any video streams. If your app does not recognize the dummy client, bind the remote user to the view when the SDK triggers the onFirstRemoteVideoDecoded
callback.
To unbind the remote user from the view, set view
in Video Canvas
as null
. Once the remote user leaves the channel, the SDK unbinds the remote user.
setRemoteRenderMode
.remote | Sets the remote video view and settings. See Video Canvas . |
|
abstract |
Sets the local video display mode.
setLocalRenderMode
2 instead.renderMode | Sets the local video display mode:
|
|
abstract |
Updates the display mode of the local video view.
After initializing the local video view, you can call this method to update its rendering and mirror modes. It affects only the video view that the local user sees, not the published local video stream.
setupLocalVideo
method to initialize the local video view before calling this method.renderMode | Sets the local video display mode:
|
mirrorMode | Sets the local video mirror mode:
|
|
abstract |
Sets the remote video display mode.
setRemoteRenderMode
2 instead.This method can be invoked multiple times during a call to change the display mode.
uid | User ID of the remote user. |
renderMode | Sets the remote video display mode:
|
|
abstract |
Updates the display mode of the video view of a remote user.
After initializing the video view of a remote user, you can call this method to update its rendering and mirror modes. This method affects only the video view that the local user sees.
setupRemoteVideo setupRemoteVideo
method to initialize the remote video view before calling this method.uid | User ID of the remote user. |
renderMode | Sets the remote video display mode:
|
mirrorMode | Sets the remote video mirror mode:
|
|
static |
Creates a RendererView.
This method returns the SurfaceView type. The operation and layout of the view are managed by the app, and the Agora SDK renders the view provided by the app. The video display view must be created using this method instead of directly calling SurfaceView
.
To use a SurfaceView, call this method; to use a TextureView, call CreateTextureView
.
context | The context of the Android Activity. |
|
static |
Creates a TextureView.
You can call this method to create a TextureView, which is suitable for scenarios that require scaling, rotation, and parallel coordinate translation of video images, such as screen sharing. The operation and layout of the view are managed by the app, and the Agora SDK renders the view provided by the app.
To use a TextureView, call this method; to use a SurfaceView, call CreateRendererView
.
context | The context of the Android Activity. |
|
abstract |
Starts the local video preview before joining a channel.
Before calling this method, you must:
setupLocalVideo
method to set the local preview window and configure the attributes. enableVideo
method to enable the video. leaveChannel
method, the local video preview remains until you call the stopPreview
method to disable it.
|
abstract |
Stops the local video preview.
After calling startPreview
, if you want to stop the local video preview, call stopPreview
.
|
abstract |
Disables/Re-enables the local video capture.
This method disables or re-enables the local video capturer, and does not affect receiving the remote video stream.
After you call the enableVideo
method, the local video capturer is enabled by default. You can call enableLocalVideo(false) to disable the local video capturer. If you want to re-enable it, call enableLocalVideo(true).
After the local video capturer is successfully disabled or re-enabled, the SDK triggers the onUserEnableLocalVideo
callback on the remote client.
leaveChannel
method.enabled | Whether to disable/re-enable the local video, including the capturer, renderer, and sender:
|
|
abstract |
Stops or resumes publishing the local video stream.
As of v3.4.5, this method only sets the publishing state of the video stream in the channel of RtcEngine
.
A successful method call triggers the onUserMuteVideo
callback on the remote client.
You can only publish the local stream in one channel at a time. If you create multiple channels, ensure that you only call muteLocalVideoStream(false)
in one channel; otherwise, the method call fails, and the SDK returns -5 (ERR_REFUSED)
.
joinChannel
[2/2] and setClientRole
methods. For details, see Set the Publishing State.muted | Sets whether to stop publishing the local video stream.
|
-5 (ERR_REFUSED)
: The request is rejected
|
abstract |
Stops or resumes subscribing to the video stream of a specified user.
uid | The user ID of the specified remote user. |
muted | Sets whether to stop subscribing to the video stream of a specified user.
|
|
abstract |
Stops or resumes subscribing to the video streams of all remote users.
After successfully calling this method, the local user stops or resumes subscribing to the video streams of all remote users, including all subsequent users.
setDefaultMuteAllRemoteVideoStreams
. Agora recommend not calling muteAllRemoteVideoStreams
and setDefaultMuteAllRemoteVideoStreams
together; otherwise, the settings may not take effect. See Set the Subscribing State.muted | Sets whether to stop subscribing to the video streams of all remote users.
|
|
abstract |
Sets whether to receive all remote video streams by default.
Stops or resumes subscribing to the video streams of all remote users by default.
Call this method after joining a channel. After successfully calling this method, the local user stops or resumes subscribing to the video streams of all subsequent users.
setDefaultMuteAllRemoteVideoStreams(true)
, do the following:muteRemoteVideoStream(false)
, and specify the user ID.muteRemoteVideoStream(false)
multiple times.muted | Sets whether to stop subscribing to the video streams of all remote users by default.
|
|
abstract |
Enables/Disables image enhancement and sets the options.
enableVideo
.libagora_video_process_extension.so
dynamic library into the project before calling this method.enabled | Whether to enable image enhancement:
|
options | The image enhancement options. See BeautyOptions . |
|
abstract |
Sets low-light enhancement.
The low-light enhancement feature can adaptively adjust the brightness value of the video captured in situations with low or uneven lighting, such as backlit, cloudy, or dark scenes. It restores or highlights the image details and improves the overall visual effect of the video.
You can call this method to enable the low-light enhancement feature and set the options of the low-light enhancement effect.
libagora_video_process_extension.so
dynamic library.enableVideo
.enabled | Sets whether to enable low-light enhancement:
|
options | The low-light enhancement options. See LowLightEnhanceOptions . |
|
abstract |
Sets video noise reduction.
Underlit environments and low-end video capture devices can cause video images to contain significant noise, which affects video quality. In real-time interactive scenarios, video noise also consumes bitstream resources and reduces encoding efficiency during encoding.
You can call this method to enable the video noise reduction feature and set the options of the video noise reduction effect.
libagora_video_process_extension.so
dynamic library.enableVideo
.enabled | Sets whether to enable video noise reduction:
|
options | The video noise reduction options. See VideoDenoiserOptions . |
|
abstract |
Sets color enhancement.
The video images captured by the camera can have color distortion. The color enhancement feature intelligently adjusts video characteristics such as saturation and contrast to enhance the video color richness and color reproduction, making the video more vivid.
You can call this method to enable the color enhancement feature and set the options of the color enhancement effect.
libagora_video_process_extension.so
dynamic library.enableVideo
.enabled | Sets whether to enable color enhancement:
|
options | The color enhancement options. See ColorEnhanceOptions . |
|
abstract |
Enables/Disables the virtual background.
After enabling the virtual background feature, you can replace the original background image of the local user with a custom background image. After the replacement, all users in the channel can see the custom background image. You can find out from the onVirtualBackgroundSourceEnabled
callback whether the virtual background is successfully enabled or the cause of any errors.
libagora_segmentation_extension.so
dynamic library into the project folder.enableVideo
.enabled | Sets whether to enable the virtual background:
|
backgroundSource | The custom background image. See VirtualBackgroundSource . Note: To adapt the resolution of the custom background image to the resolution of the SDK capturing video, the SDK scales and crops the custom background image while ensuring that the content of the custom background image is not distorted. |
|
abstract |
Sets the default audio route.
If the default audio route of the SDK (see Set the Audio Route) cannot meet your requirements, you can call this method to switch the default audio route. After successfully switching the audio route, the SDK triggers the onAudioRouteChanged
callback to indicate the changes.
joinChannel
. If you need to switch the audio route after joining a channel, call setEnableSpeakerphone
.defaultToSpeaker | Sets the default audio route as follows:
|
|
abstract |
Enables/Disables the audio route to the speakerphone.
If the default audio route of the SDK (see Set the Audio Route) or the setting in setDefaultAudioRoutetoSpeakerphone
cannot meet your requirements, you can call this method to switch the current audio route. After successfully switching the audio route, the SDK triggers the onAudioRouteChanged
callback to indicate the changes.
This method only sets the audio route in the current channel and does not influence the default audio route. If the user leaves the current channel and joins another channel, the default audio route is used.
joinChannel
.enabled | Sets whether to enable the speakerphone or earpiece:
|
|
abstract |
Checks whether the speakerphone is enabled.
true
: The speakerphone is enabled, and the audio plays from the speakerphone.false
: The speakerphone is not enabled, and the audio plays from devices other than the speakerphone. For example, the headset or earpiece.
|
abstract |
Enables in-ear monitoring.
enabled | Sets whether to enable/disable in-ear monitoring:
|
|
abstract |
Sets the volume of the in-ear monitor.
volume | Sets the volume of the in-ear monitor. The value ranges between 0 and 100 (default). |
|
abstract |
Uses an external audio device.
|
abstract |
Changes the voice pitch of the local speaker.
pitch | Sets the voice pitch. The value ranges between 0.5 and 2.0. The lower the value, the lower the voice pitch. The default value is 1.0 (no change to the local voice pitch). |
|
abstract |
Sets the local voice equalization effect.
bandFrequency | Sets the band frequency. The value ranges between 0 and 9; representing the respective 10-band center frequencies of the voice effects, including 31, 62, 125, 250, 500, 1k, 2k, 4k, 8k, and 16k Hz. |
bandGain | Sets the gain of each band (dB). The value ranges between -15 and 15. The default value is 0. |
|
abstract |
Sets the local voice reverberation.
setAudioEffectPreset
, which directly implements the popular music, R&B music, KTV and other preset reverb effects.reverbKey | The reverberation key. This method contains five reverberation keys. For details, see the description of each @ value. |
value | The local voice reverberation value:
|
|
abstract |
Sets the local voice changer option.
setAudioEffectPreset
: Audio effects.setVoiceBeautifierPreset
: Voice beautifier effects.setVoiceConversionPreset
: Voice conversion effects.You can call this method either before or after joining a channel.
This method can be used to set the local voice effect for users in a communication channel or hosts in a LIVE_BROADCASTING
channel. Voice changer options include the following voice effects:
VOICE_CHANGER_XXX
: Changes the local voice to an old man, a little boy, or the Hulk. Applies to the voice talk scenario.VOICE_BEAUTY_XXX
: Beautifies the local voice by making it sound more vigorous, resounding, or adding spacial resonance. Applies to the voice talk and singing scenario.GENERAL_VOICE_BEAUTY_XXX
: Adds gender-based beautification effect to the local voice. Applies to the voice talk scenario.setAudioProfile
as AUDIO_PROFILE_MUSIC_HIGH_QUALITY
or AUDIO_PROFILE_MUSIC_HIGH_QUALITY_STEREO
.setLocalVoiceReverbPreset
, because the method called later overrides the one called earlier. For detailed considerations, see the advanced guide Set the Voice Effect.voiceChanger | The local voice changer option:
|
|
abstract |
Sets the local voice reverberation option, including the virtual stereo.
You can call this method either before or after joining a channel.
setAudioEffectPreset
or setVoiceBeautifierPreset
instead.This method sets the local voice reverberation for users in a communication channel or hosts in a LIVE_BROADCASTING
channel. After successfully calling this method, all users in the channel can hear the voice with reverberation.
AUDIO_REVERB_FX
, ensure that you set profile in setAudioProfile
as AUDIO_PROFILE_MUSIC_HIGH_QUALITY
or AUDIO_PROFILE_MUSIC_HIGH_QUALITY_STEREO
, otherwise, this methods cannot set the corresponding voice reverb.AUDIO_VIRTUAL_STEREO
, Agora recommends setting the profile parameter in setAudioProfile
as AUDIO_PROFILE_MUSIC_HIGH_QUALITY_STEREO
.setLocalVoiceChanger
, because the method called later overrides the one called earlier. For detailed considerations, see the advanced guide Set the Voice Effect.preset | The local voice reverberation option. To achieve better voice effects, Agora recommends the enumeration whose name begins with AUDIO_REVERB_FX .
|
|
abstract |
Sets an SDK preset audio effect.
Call this method to set an SDK preset audio effect for the local user who sends an audio stream. This audio effect does not change the gender characteristics of the original voice. After setting an audio effect, all users in the channel can hear the effect.
You can set different audio effects for different scenarios. See Set the Voice Effect.
To achieve better audio effect quality, Agora recommends calling setAudioProfile
and setting the scenario
parameter to AUDIO_SCENARIO_GAME_STREAMING(3)
before calling this method.
setAudioProfile
to AUDIO_PROFILE_SPEECH_STANDARD(1)
; otherwise, this method call does not take effect.ROOM_ACOUSTICS_3D_VOICE
or PITCH_CORRECTION
, do not call setAudioEffectParameters
; otherwise, setAudioEffectParameters
overrides this method.setAudioEffectPreset
:
|
abstract |
Sets an SDK preset voice beautifier effect.
Call this method to set an SDK preset voice beautifier effect for the local user who sends an audio stream. After setting a voice beautifier effect, all users in the channel can hear the effect.
You can set different voice beautifier effects for different scenarios. See Set the Voice Effect.
To achieve better audio effect quality, Agora recommends calling setAudioProfile
and setting the scenario
parameter to AUDIO_SCENARIO_GAME_STREAMING(3)
and the profile
parameter to AUDIO_PROFILE_MUSIC_HIGH_QUALITY(4)
or AUDIO_PROFILE_MUSIC_HIGH_QUALITY_STEREO(5)
before calling this method.
setAudioProfile
to AUDIO_PROFILE_SPEECH_STANDARD(1)
; otherwise, this method call does not take effect.setVoiceBeautifierPreset
:
|
abstract |
Sets an SDK preset voice conversion effect.
Call this method to set an SDK preset voice conversion effect for the local user who sends an audio stream. After setting a voice conversion effect, all users in the channel can hear the effect.
You can set different voice conversion effects for different scenarios. See Set the Voice Effect.
To achieve better audio effect quality, Agora recommends calling setAudioProfile
and setting the scenario
parameter to AUDIO_SCENARIO_GAME_STREAMING(3)
and the profile
parameter to AUDIO_PROFILE_MUSIC_HIGH_QUALITY(4)
or AUDIO_PROFILE_MUSIC_HIGH_QUALITY_STEREO(5)
before calling this method.
profile
parameter of setAudioProfile
to AUDIO_PROFILE_SPEECH_STANDARD(1)
; otherwise, this method call does not take effect.setVoiceConversionPreset
:
preset | The options for SDK preset voice conversion effects: |
|
abstract |
Sets parameters for SDK preset audio effects.
Call this method to set the following parameters for the local user who sends an audio stream:
After setting parameters, all users in the channel can hear the relevant effect.
setAudioProfile
and setting the scenario parameter to AUDIO_SCENARIO_GAME_STREAMING(3)
before calling this method.AUDIO_PROFILE_SPEECH_STANDARD(1)
; otherwise, this method call does not take effect.setAudioEffectParameters
:
preset | The options for SDK preset audio effects:
|
param1 |
|
param2 |
|
|
abstract |
Sets parameters for SDK preset voice beautifier effects.
Call this method to set a gender characteristic and a reverberation effect for the singing beautifier effect. This method sets parameters for the local user who sends an audio stream.
After you call this method successfully, all users in the channel can hear the relevant effect.
To achieve better audio effect quality, before you call this method, Agora recommends calling setAudioProfile
, and setting the scenario parameter to AUDIO_SCENARIO_GAME_STREAMING(3)
and the profile parameter to AUDIO_PROFILE_MUSIC_HIGH_QUALITY(4)
or AUDIO_PROFILE_MUSIC_HIGH_QUALITY_STEREO(5)
.
profile
parameter of setAudioProfile
to AUDIO_PROFILE_SPEECH_STANDARD(1)
; otherwise, this method call does not take effect.setVoiceBeautifierParameters
:
preset | The options for SDK preset voice beautifier effects:
|
param1 | The gender characteristics options for the singing voice:
|
param2 | The reverberation effects options:
|
|
abstract |
Enables or disables noise suppression.
The SDK enables traditional noise reduction mode by default to reduce most of the stationary background noise. If you need to reduce most of the non-stationary background noise, Agora recommends enabling noise suppression as follows:
libagora_ai_denoise_extension.so
library is integrated in your project.enableDeepLearningDenoise(true)
.Noise suppression requires high-performance devices.
After successfully enabling noise suppression, if the SDK detects that the device performance is not sufficient, it automatically disables noise suppression and enables traditional noise reduction.
If you call enableDeepLearningDenoise(false)
or the SDK automatically disables noise suppression in the channel, when you need to re-enable noise suppression, you need to call leaveChannel
first, and then call enableDeepLearningDenoise(true)
.
libagora_ai_denoise_extension.so
, so Agora recommends calling this method before joining a channel.enabled | Sets whether to enable noise suppression.
|
-157
(ERR_MODULE_NOT_FOUND
): The library for enabling noise suppression is not integrated.
|
abstract |
Enables/Disables stereo panning for remote users.
Ensure that you call this method before joinChannel
to enable stereo panning for remote users so that the local user can track the position of a remote user by calling setRemoteVoicePosition
.
enabled | Whether to enable stereo panning for remote users:
|
|
abstract |
Sets the sound position of a remote user.
When the local user calls this method to set the sound position of a remote user, the sound difference between the left and right channels allows the local user to track the real-time position of the remote user, creating a real sense of space. This method applies to massively multiplayer online games, such as Battle Royale games.
enableSoundPositionIndication
method before joining a channel.uid | The ID of the remote user. |
pan | The sound position of the remote user. The value ranges from -1.0 to 1.0:
|
gain | Gain of the remote user. The value ranges from 0.0 to 100.0. The default value is 100.0 (the original gain of the remote user). The smaller the value, the less the gain. |
|
abstract |
Starts playing and mixing the music file.
startAudioMixing
[2/2] instead.This method mixes the specified local or online audio file with the audio stream from the microphone, or replaces the microphone’s audio stream with the specified local or remote audio file. You can choose whether the other user can hear the local audio playback and specify the number of playback loops. When the audio mixing file playback finishes after calling this method, the SDK triggers the onAudioMixingFinished
callback.
A successful startAudioMixing
method call triggers the onAudioMixingStateChanged
(MEDIA_ENGINE_AUDIO_EVENT_MIXING_PLAY) callback on the local client.
When the audio mixing file playback finishes, the SDK triggers the onAudioMixingStateChanged
(MEDIA_ENGINE_AUDIO_EVENT_MIXING_STOPPED) callback on the local client.
MEDIA_ENGINE_AUDIO_ERROR_MIXING_TOO_FREQUENT = 702
warning occurs.WARN_AUDIO_MIXING_OPEN_ERROR = 701
.http
, you need to add android:usesCleartextTraffic="true"
to the /app/Manifests/AndroidManifest.xml
file.filePath | The file path, including the filename extensions. To access an online file, Agora supports using a URL address; to access a local file, Agora supports using a URI address, an absolute path, or a path that starts with /assets/. Note: You might encounter permission issues if you use an absolute path to access a local file, so Agora recommends using a URI address instead. For example: "content://com.android.providers.media.documents/document/audio%3A14441". |
loopback | Sets which user can hear the audio mixing:
|
replace | Sets the audio mixing content:
|
cycle | Sets the number of playback loops:
|
WARN_AUDIO_MIXING_OPEN_ERROR (701)
: If the local audio file does not exist, or the online audio packet is not received within five seconds after it is opened, the SDK assumes that the media file cannot be used and returns this warning.
|
abstract |
Specifies the playback track of the current music file.
After getting the number of audio tracks of the current music file, call this method to specify any audio track to play. For example, if different tracks of a multitrack file store songs in different languages, you can call this method to set the language of the music file to play.
startAudioMixing
[2/2] and receiving the onAudioMixingStateChanged
(MEDIA_ENGINE_AUDIO_EVENT_MIXING_PLAY) callback.audioIndex | The specified playback track. The value range is [0, getAudioTrackCount() ). |
|
abstract |
Gets the number of audio tracks of the current music file.
startAudioMixing
[2/2] and receiving the onAudioMixingStateChanged
(MEDIA_ENGINE_AUDIO_EVENT_MIXING_PLAY)
callback.
|
abstract |
Sets the channel mode of the current music file.
In a stereo music file, the left and right channels can store different audio data. According to your needs, you can set the channel mode to original mode, left channel mode, right channel mode, or mixed channel mode. For example, in the KTV scenario, the left channel of the music file stores the musical accompaniment, and the right channel stores the singing voice. If you only need to listen to the accompaniment, call this method to set the channel mode of the music file to left channel mode; if you need to listen to the accompaniment and the singing voice at the same time, call this method to set the channel mode to mixed channel mode.
startAudioMixing
[2/2] and receiving the onAudioMixingStateChanged
(MEDIA_ENGINE_AUDIO_EVENT_MIXING_PLAY)
callback.mode | The channel mode. See AudioMixingDualMonoMode . |
|
abstract |
Starts playing and mixing the music file.
This method supports mixing or replacing local or online music file and audio collected by a microphone. After successfully playing the music file, the SDK triggers onAudioMixingStateChanged
(AUDIO_MIXING_STATE_PLAYING, AUDIO_MIXING_REASON_STARTED_BY_USER)
. After completing playing the music file, the SDK triggers onAudioMixingStateChanged
(AUDIO_MIXING_STATE_STOPPED, AUDIO_MIXING_REASON_ALL_LOOPS_COMPLETED)
.
/sdcard/
directory and the format is MP3.startAudioMixing
multiple times, ensure that the call interval is longer than 500 ms.WARN_AUDIO_MIXING_OPEN_ERROR(701)
.http
, you need to add android:usesCleartextTraffic="true"
to the /app/Manifests/AndroidManifest.xml
file.filePath | The file path, including the filename extensions. To access an online file, Agora supports using a URL address; to access a local file, Agora supports using a URI address, an absolute path, or a path that starts with /assets/. Note You might encounter permission issues if you use an absolute path to access a local file, so Agora recommends using a URI address instead. For example: "content://com.android.providers.media.documents/document/audio%3A14441". |
loopback | Whether to only play the music file on the local client:
|
replace | Whether to replace the audio collected by the microphone with a music file:
|
cycle | The number of times the music file plays.
|
startPos | The playback position (ms) of the music file. |
|
abstract |
Sets the playback speed of the current music file.
Call this method after calling startAudioMixing
[2/2] and receiving the onAudioMixingStateChanged
(MEDIA_ENGINE_AUDIO_EVENT_MIXING_PLAY)
callback.
speed | The playback speed. Agora recommends that you limit this value to between 50 and 400, defined as follows:
|
|
abstract |
Stops playing or mixing the music file.
Call this method when you are in a channel.
|
abstract |
Pauses playing and mixing the music file.
Call this method when you are in a channel.
|
abstract |
Resumes playing and mixing the music file.
Call this method when you are in a channel.
|
abstract |
Adjusts the volume of audio mixing.
Call this method after calling startAudioMixing
and receiving the onAudioMixingStateChanged
(MEDIA_ENGINE_AUDIO_EVENT_MIXING_PLAY)
callback.
playEffect
method.volume | Audio mixing volume. The value ranges between 0 and 100 (default). |
|
abstract |
Adjusts the volume of audio mixing for local playback.
Call this method after calling startAudioMixing
and receiving the onAudioMixingStateChanged
(MEDIA_ENGINE_AUDIO_EVENT_MIXING_PLAY)
callback.
volume | Audio mixing volume for local playback. The value ranges between 0 and 100 (default). |
|
abstract |
Adjusts the volume of audio mixing for publishing (sending to other users).
Call this method after calling startAudioMixing
and receiving the onAudioMixingStateChanged
(MEDIA_ENGINE_AUDIO_EVENT_MIXING_PLAY)
callback.
volume | Audio mixing volume for publishing. The value ranges between 0 and 100 (default). |
|
abstract |
Gets the audio mixing volume for local playback.
This method helps troubleshoot audio volume related issues.
Call this method after calling startAudioMixing
and receiving the onAudioMixingStateChanged
(MEDIA_ENGINE_AUDIO_EVENT_MIXING_PLAY)
callback.
|
abstract |
Gets the audio mixing volume for publishing.
This method helps troubleshoot audio volume related issues.
Call this method after calling startAudioMixing
and receiving the onAudioMixingStateChanged
(MEDIA_ENGINE_AUDIO_EVENT_MIXING_PLAY)
callback.
|
abstract |
Gets the duration (ms) of the music file which is played by startAudioMixing
.
getAudioFileInfo
instead.Call this method when you are in a channel.
Call this method after calling startAudioMixing
and receiving the onAudioMixingStateChanged
(MEDIA_ENGINE_AUDIO_EVENT_MIXING_PLAY)
callback.
|
abstract |
Gets the playback position (ms) of the music file.
Call this method after calling startAudioMixing
and receiving the onAudioMixingStateChanged
(MEDIA_ENGINE_AUDIO_EVENT_MIXING_PLAY)
callback.
getAudioMixingCurrentPosition
multiple times, ensure that the call interval is longer than 500 ms.
|
abstract |
Sets the playback position (ms) of the music file to a different starting position (the default plays from the beginning).
Call this method after calling startAudioMixing
and receiving the onAudioMixingStateChanged
(MEDIA_ENGINE_AUDIO_EVENT_MIXING_PLAY)
callback.
pos | The playback starting position (ms) of the audio mixing file. |
|
abstract |
Sets the pitch of the local music file.
When a local music file is mixed with a local human voice, call this method to set the pitch of the local music file only.
Call this method after calling startAudioMixing
and receiving the onAudioMixingStateChanged
(MEDIA_ENGINE_AUDIO_EVENT_MIXING_PLAY)
callback.
pitch | Sets the pitch of the local music file by chromatic scale. The default value is 0, which means keep the original pitch. The value ranges from -12 to 12, and the pitch value between consecutive values is a chromatic value. The greater the absolute value of this parameter, the higher or lower the pitch of the local music file. |
|
abstract |
Gets the IAudioEffectManager
object associated with the current RtcEngine instance.
IAudioEffectManager
|
abstract |
Gets the information of a specified audio file.
After calling this method successfully, the SDK triggers the onRequestAudioFileInfo
callback to report the information of an audio file, such as audio duration. You can call this method multiple times to get the information of multiple audio files.
filePath | The file path, including the filename extensions. To access an online file, Agora supports using a URL address; to access a local file, Agora supports using a URI address, an absolute path, or a path that starts with /assets/ . You might encounter permission issues if you use an absolute path to access a local file, so Agora recommends using a URI address instead. For example: content://com.android.providers.media.documents/document/audio%3A14441 . |
|
abstract |
Starts an audio recording on the client.
startAudioRecording
[3/3] instead.The SDK allows recording during a call. Supported formats of the recording file are as follows:
This method has a fixed sample rate of 32 kHz.
joinChannel
method. The recording automatically stops when you call the leaveChannel
method.filePath | Full file path of the recording file. The string of the file name is in UTF-8. For example, /sdcard/emulated/0/audio/aac . |
quality | The audio recording quality:
|
|
abstract |
Starts an audio recording on the client.
startAudioRecording
[3/3] instead.The SDK allows recording during a call. After successfully calling this method, you can record the audio of all the users in the channel and get an audio recording file.
Supported formats of the recording file are as follows:
joinChannel
method. The recording automatically stops when you call the leaveChannel
method.quality
as AUDIO_RECORDING_QUALITY_MEDIUM or AUDIO_RECORDING_QUALITY_HIGH when sampleRate
is 44.1 kHz or 48 kHz.filePath | Absolute file path of the recording file. The string of the file name is in UTF-8. For example, /sdcard/emulated/0/audio/aac . |
sampleRate | Sample rate (Hz) of the recording file. Supported values are as follows:
|
quality | The audio recording quality:
|
|
abstract |
Starts an audio recording on the client.
The SDK allows recording audio during a call. After successfully calling this method, you can record the audio of users in the channel and get an audio recording file. Supported file formats are as follows:
AUDIO_RECORDING_QUALITY_MEDIUM
, the file size for 10-minute recording is approximately 2 MB.Once the user leaves the channel, the recording automatically stops.
config | Recording configuration. See AudioRecordingConfiguration . |
ERR_ALREADY_IN_RECORDING(-160)
: The client is already recording audio. To start a new recording, call stopAudioRecording
to stop the current recording first, and then call startAudioRecording
.
|
abstract |
Stops the audio recording on the client.
|
abstract |
Starts an audio call test.
startEchoTest
to start an audio call test.This method launches an audio call test to determine whether the audio devices (for example, headset and speaker) and the network connection are working properly.
To conduct the test:
stopEchoTest
method to end the test. Otherwise, the app cannot run the next echo test, nor can it call the joinChannel
method to start a new call.LIVE_BROADCASTING
profile, only hosts can call this method. If a user switches from the COMMUNICATION
to LIVE_BROADCASTING
profile, the user must call the setClientRole
method to change the user role from an audience (default) to a host before calling this method.
|
abstract |
Starts an audio call test.
In the audio call test, you record your voice. If the recording plays back within the set time interval, the audio devices and the network connection are working properly.
stopEchoTest
method to end the test. Otherwise, the app cannot run the next echo test, or call the joinChannel
method.LIVE_BROADCASTING
profile, only a host can call this method.intervalInSeconds | The time interval (s) between when you speak and when the recording plays back. |
|
abstract |
Starts an audio and video call loop test.
Before joining a channel, to test whether the user's local sending and receiving streams are normal, you can call this method to perform an audio and video call loop test, which tests whether the audio and video devices and the user's upstream and downstream networks are working properly.
After starting the test, the user needs to make a sound or face the camera. The audio or video is output after about two seconds. If the audio playback is normal, the audio device and the user's upstream and downstream networks are working properly; if the video playback is normal, the video device and the user's upstream and downstream networks are working properly.
stopEchoTest
to end the test; otherwise, the user cannot perform the next audio and video call loop test and cannot join the channel.LIVE_BROADCASTING
profile, only a host can call this method.config | The configuration of the audio and video call loop test. See EchoTestConfiguration . |
|
abstract |
Stops call loop test.
After calling startEchoTest
[2/3] or startEchoTest
[3/3], call this method if you want to stop the call loop test.
ERR_REFUSED(-5)
: Failed to stop the echo test. The echo test may not be running.
|
abstract |
Enables the network connection quality test.
This method tests the quality of the users' network connections and is disabled by default.
Before users join a channel or before an audience switches to a host, call this method to check the uplink network quality. This method consumes additional network traffic, which may affect the communication quality. Call the disableLastmileTest
method to disable this test after receiving the onLastmileQuality
callback, and before the user joins a channel or switches the user role.
startLastmileProbeTest
method.onLastmileQuality
callback. Otherwise, the callback may be interrupted by other methods and may not execute.LIVE_BROADCASTING
profile, a host should not call this method after joining a channel.setVideoEncoderConfiguration
method. After you join the channel, whether you have called the disableLastmileTest
method or not, the SDK automatically stops consuming the bandwidth.
|
abstract |
Disables the network connection quality test.
|
abstract |
Starts the last-mile network probe test before joining a channel to get the uplink and downlink last-mile network statistics, including the bandwidth, packet loss, jitter, and round-trip time (RTT).
Once this method is enabled, the SDK returns the following callbacks:
onLastmileQuality
: the SDK triggers this callback within two seconds depending on the network conditions. This callback rates the network conditions with a score and is more closely linked to the user experience.onLastmileProbeResult
: the SDK triggers this callback within 30 seconds depending on the network conditions. This callback returns the real-time statistics of the network conditions and is more objective.Call this method to check the uplink network quality before users join a channel or before an audience switches to a host.
enableLastmileTest
.onLastmileQuality
and onLastmileProbeResult
callbacks. Otherwise, the callbacks may be interrupted by other methods.LIVE_BROADCASTING
profile, a host should not call this method after joining a channel.config | The configurations of the last-mile network probe test. For details, see LastmileProbeConfig . |
|
abstract |
Stops the last-mile network probe test.
|
abstract |
Sets a custom video source.
During real-time communication, the Agora SDK enables the default video input device, that is, the built-in camera to capture video. If you need a custom video source, implement the IVideoSource
class first, and call this method to add the custom video source to the SDK.
You can call this method either before or after joining a channel.
source | The custom video source. See IVideoSource . |
|
abstract |
Customizes the local video renderer.
Call this method to add an external local video renderer to the SDK.
You can call this method either before or after joining a channel.
render | Sets the local video renderer. See IVideoSink . |
|
abstract |
Customizes the remote video renderer.
Call this method to add an external remote video renderer to the SDK.
You can call this method either before or after joining a channel. If you call it before joining a channel, you need to maintain the uid
of the remote user on your app level.
uid | The ID of the remote user. |
render | Sets the remote video renderer. See IVideoSink . |
|
abstract |
Sets the external audio sink.
This method applies to scenarios where you want to use external audio data for playback.
Ensure that you call this method before joining a channel.
onPlaybackFrame
callback after using the Pull method to set the external audio sink.enabled | Whether to enable or disable the external audio sink:
|
sampleRate | The sample rate (Hz) of the external audio sink. You can set this parameter as 16000, 32000, 44100, or 48000. |
channels | The number of audio channels of the external audio sink:
|
|
abstract |
Pulls the remote audio frame.
Before calling this method, call the setExternalAudioSink
(enabled: true) method to enable and set the external audio sink.
After a successful method call, the app pulls the decoded and mixed audio data for playback.
onPlaybackFrame
callback after using the Pull method to set the external audio sink.pullPlaybackAudioFrame
method successfully, the app will not retrieve any audio data from the onPlaybackFrame
callback.onPlaybackFrame
callback and the pullPlaybackAudioFrame
method is as follows:onPlaybackFrame
: The SDK sends the audio data to the app through this callback. Any delay in processing the audio frames may result in audio jitter.pullPlaybackAudioFrame
: The app pulls the remote audio data. After setting the audio data parameters, the SDK adjusts the frame buffer and avoids problems caused by jitter in the external audio playback.data | The audio data that you want to pull. The data format is in byte[]. |
lengthInByte | The data length (byte) of the external audio data. The value of this parameter is related to the audio duration, and the values of the sampleRate and channels parameters that you set in setExternalAudioSink . Agora recommends setting the audio duration no shorter than 10 ms. The formula for lengthInByte is: lengthInByte = sampleRate/1000 × 2 × channels × audio duration (ms). |
|
abstract |
Sets the external audio source.
joinChannel
and startPreview
methods. enabled | Whether to enable/disable the external audio source:
|
sampleRate | The sample rate (Hz) of the external audio source, which can be set as 8000, 16000, 32000, 44100, or 48000 Hz. |
channels | The number of channels of the external audio source:
|
|
abstract |
Pushes the external audio frame to the Agora SDK for encoding.
pushExternalAudioFrame
[2/2] instead.data | The external audio frame. The value range of the audio frame length (ms) is [10,60]. |
timestamp | Timestamp (ms) of the external audio frame. It is mandatory. You can use this parameter for the following purposes:
|
|
abstract |
Pushes the external audio frame to a specified position.
According to your needs, you can push the external audio frame to one of three positions: after audio capture, before audio encoding, or before local playback. You can call this method multiple times to push one audio frame to multiple positions or multiple audio frames to different positions. For example, in the KTV scenario, you can push the singing voice to after audio capture, so that the singing voice can be processed by the SDK audio module and you can obtain a high-quality audio experience; you can also push the accompaniment to before audio encoding, so that the accompaniment is not affected by the audio module of the SDK.
data | The external audio frame. |
timestamp | The timestamp (ms) of the external audio frame. You can use this parameter for the following purposes:
|
sampleRate | The sample rate of the external audio source, which can be set as 8000, 16000, 32000, 44100, or 48000 Hz. |
channels | The number of audio channels:
|
bytesPerSample | The number of bytes per audio sample, which is usually 16-bit (2-byte). |
sourcePos | The push position of the external audio frame. See AudioExternalSourcePos . |
ERR_INVALID_ARGUMENT
): The parameter is invalid.ERR_TOO_OFTEN
): The call frequency is too high, causing the internal buffer to overflow. Call this method again after 30-50 ms.
|
abstract |
Sets the volume of the external audio frame in the specified position.
You can call this method multiple times to set the volume of external audio frames in different positions. The volume setting takes effect for all external audio frames that are pushed to the specified position.
sourcePos | The push position of the external audio frame. See AudioExternalSourcePos . |
volume | The volume of the external audio frame. The value range is [0,100]. The default value is 100, which represents the original value. |
ERR_INVALID_ARGUMENT
): The parameter is invalid.
|
abstract |
Configures the external video source.
Ensure that you call this method before joining a channel.
enable | Whether to use the external video source:
|
useTexture | Whether to use texture as an input:
|
pushMode | Whether or not the external video source needs to call the PushExternalVideoFrame method to send the video frame to the Agora SDK:
|
|
abstract |
Pushes the video frame using the AgoraVideoFrame
class and passes the video frame to the Agora SDK.
Call the setExternalVideoSource
method and set pushMode
as true
before calling this method. Otherwise, a failure returns after calling this method. Ensure that you call this method after joining a channel.
frame | Video frame to be pushed. See AgoraVideoFrame . |
COMMUNICATION
profile, the SDK does not support textured video frames. true
: The frame is pushed successfully.
|
abstract |
Checks whether texture encoding is supported.
true
: Texture encoding is supported.false
: Texture encoding is not supported.
|
abstract |
Registers the audio observer object.
Ensure that you call this method before joining a channel.
observer | Audio observer object to be registered. See IAudioFrameObserver . Set the value as null to cancel registering, if necessary. |
|
abstract |
Registers a local encoded video frame observer.
After you successfully register the local encoded video frame observer, the SDK triggers the callbacks that you have implemented in the IVideoEncodedFrameObserver
class each time a video frame is received.
observer | The local encoded video frame observer. See IVideoEncodedFrameObserver . If null is passed, the observer registration is canceled. |
|
abstract |
Registers a raw video frame observer.
After you successfully register the raw video frame observer, the SDK triggers the callbacks that you have implemented in the IVideoFrameObserver
class each time a video frame is received.
observer | The raw video frame observer. See IVideoFrameObserver . If null is passed, the observer registration is canceled. |
|
abstract |
Sets the audio sampling format for the onRecordFrame
callback.
Ensure that you call this method before joining a channel.
sampleRate
, channel
, and samplesPerCall
parameters you set in this method. Sample interval (s) = samplePerCall
/(sampleRate × channel). Ensure that the value of sample interval ≥ 0.01. The SDK triggers the onRecordFrame
callback according to the sample interval.sampleRate | The sample rate (samplesPerSec ) returned in the onRecordFrame callback, which can be set as 8000, 16000, 32000, 44100, or 48000 Hz. |
channel | The number of audio channels (channels ) returned in the onRecordFrame callback:
|
mode | The use mode of the onRecordFrame callback:
|
samplesPerCall | The number of samples the onRecordFrame callback returns. In RTMP or RTMPS streaming scenarios, set it as 1024. |
|
abstract |
Sets the audio playback format for the onPlaybackFrame
callback.
Ensure that you call this method before joining a channel.
sampleRate | The sample rate (samplesPerSec ) returned in the onPlaybackFrame callback, which can be set as 8000, 16000, 32000, 44100, or 48000 Hz. |
channel | The number of channels (channels ) returned in the onPlaybackFrame callback:
|
mode | The use mode of the onPlaybackFrame callback:
|
samplesPerCall | The number of samples the onPlaybackFrame callback returns. In RTMP or RTMPS streaming scenarios, set it as 1024. |
sampleRate
, channel
, and samplesPerCall
parameters you set in this method. Sample interval (s) = samplePerCall
/(sampleRate × channel). Ensure that the value of sample interval ≥ 0.01. The SDK triggers the onPlaybackFrame
callback according to the sample interval.
|
abstract |
Sets the mixed audio format for the onMixedAudioFrame callback.
Ensure that you call this method before joining a channel.
sampleRate | The sample rate (samplesPerSec ) returned in the onMixedAudioFrame callback, which can be set as 8000, 16000, 32000, 44100, or 48000 Hz. |
samplesPerCall | The number of samples the onMixedAudioFrame callback returns. In RTMP or RTMPS streaming scenarios, set it as 1024. |
sampleRate
and samplesPerCall
parameters you set in this method, and channels
in AudioFrame. Sample interval (s) = samplePerCall
/(sampleRate × channel). Ensure that the value of sample interval ≥ 0.01. The SDK triggers the onMixedAudioFrame callback according to the sample interval.
|
abstract |
Adds a watermark image to the local video.
addVideoWatermark
2 method instead.This method adds a PNG watermark image to the local video stream for the sampling device, channel audience, or CDN live audience to see and capture. To add the PNG file to a CDN live publishing stream, see the setLiveTranscoding
method.
watermark | Watermark image to be added to the local video stream. See Agora Image . |
url
in Agora Image
refers to the absolute path of the added watermark image file in the local video stream. url
in Agora Image
refers to the address of the added watermark image in the CDN live streaming. orientationMode
as Adaptive in the setVideoEncoderConfiguration
method, the watermark image rotates with the video frame around the upper left corner of the watermark image.
|
abstract |
Adds a watermark image to the local video.
addVideoWatermark
1.This method adds a PNG watermark image to the local video stream in a live streaming. Once the watermark image is added, all the audience in the channel (CDN audience included), and the sampling device can see and capture it.
Agora supports adding only one watermark image onto the local video, and the newly watermark image replaces the previous one.
The watermark position depends on the settings in the setVideoEncoderConfiguration
method:
enableVideo
method to enable the video module before calling this method.setLiveTranscoding
method.startPreview
method, you can use the visibleInPreview
member in the WatermarkOptions
class to set whether or not the watermark is visible in preview.watermarkUrl | The local file path of the watermark image to be added. Agora supports using a URI address, an absolute path, or a path that starts with /assets/ to access a local file. Note You might encounter permission issues if you use an absolute path to access a local file, so Agora recommends using a URI address instead. For example: "content://com.android.providers.media.documents/document/image%3A1384". |
options | The options of the watermark image to be added. See Watermark Options . |
|
abstract |
Removes the watermark image from the video stream added by addVideoWatermark
.
|
abstract |
Sets the priority of a remote user's media stream.
The SDK ensures the high-priority user gets the best possible stream quality.
Ensure that you call this method before joining a channel.
uid | The ID of the remote user. |
userPriority | The priority of the remote user:
|
|
abstract |
Sets the fallback option for the locally published video stream based on the network conditions.
If option
is set as STREAM_FALLBACK_OPTION_AUDIO_ONLY(2)
, the SDK will:
When the locally published video stream falls back to audio only or when the audio-only stream switches back to the video, the SDK triggers the onLocalPublishFallbackToAudioOnly
.
Ensure that you call this method before joining a channel.
option | The fallback option for the locally published video stream:
|
|
abstract |
Sets the fallback option for the remotely subscribed video stream based on the network conditions.
If option
is set as STREAM_FALLBACK_OPTION_AUDIO_ONLY(2)
, the SDK automatically switches the video from a high-stream to a low-stream, or disables the video when the downlink network condition cannot support both audio and video to guarantee the quality of the audio. The SDK monitors the network quality and restores the video stream when the network conditions improve. When the remotely subscribed video stream falls back to audio only, or the audio-only stream switches back to the video, the SDK triggers the onRemoteSubscribeFallbackToAudioOnly
callback.
Ensure that you call this method before joining a channel.
option | The fallback option for the remotely subscribed video stream:
|
|
abstract |
Enables/Disables the dual video stream mode.
If dual-stream mode is enabled, the receiver can choose to receive the high stream (high-resolution high-bitrate video stream) or low stream (low-resolution low-bitrate video stream) video. You can call this method either before or after joining a channel.
enabled | Whether to enable dual-stream mode:
|
|
abstract |
Sets the stream type of the remote video.
Under limited network conditions, if the publisher has not disabled the dual-stream mode using enableDualStreamMode(false)
, the receiver can choose to receive either the high-quality video stream (the high resolution, and high bitrate video stream) or the low-quality video stream (the low resolution, and low bitrate video stream).
By default, users receive the high-quality video stream. Call this method if you want to switch the remote stream type to reduce the bandwidth and resources.
The aspect ratio of the low-quality video stream is the same as the high-quality video stream. Once the resolution of the high-quality video stream is set, the system automatically sets the resolution, frame rate, and bitrate of the low-quality video stream.
The SDK reports the result of calling this method in the onApiCallExecuted
callback.
You can call this method either before or after joining a channel. If you call both setRemoteVideoStreamType
and setRemoteDefaultVideoStreamType
, the SDK applies the settings in the setRemoteVideoStreamType
method.
uid | ID of the remote user sending the video stream. |
streamType | The video-stream type:
|
|
abstract |
Sets the default video-stream type of the remotely subscribed video stream when the remote user sends dual streams.
setRemoteVideoStreamType
, the SDK applies the settings in the setRemoteVideoStreamType
method.streamType | The default video-stream type:
|
|
abstract |
Enables built-in encryption with an encryption password before joining a channel.
enableEncryption
instead.All users in a channel must set the same encryption password. The encryption password is automatically cleared once a user leaves the channel. If the encryption password is not specified or set to empty, the encryption functionality is disabled.
secret | Encryption password. |
|
abstract |
Sets the built-in encryption mode.
enableEncryption
instead.The Agora SDK supports built-in encryption, which is set to aes-128-xts
mode by default. Call this method to set the encryption mode to use other encryption modes. All users in the same channel must use the same encryption mode and password.
Refer to the information related to the AES encryption algorithm on the differences between the encryption modes.
setEncryptionSecret
before calling this method.encryptionMode | The encryption mode:
|
|
abstract |
Enables/Disables the built-in encryption.
In scenarios requiring high security, Agora recommends calling enableEncryption
to enable the built-in encryption before joining a channel.
After a user leaves the channel, the SDK automatically disables the built-in encryption. To re-enable the built-in encryption, call this method before the user joins the channel again.
As of v3.4.5, Agora recommends using either the AES_128_GCM2
or AES_256_GCM2
encryption mode, both of which support adding a salt and are more secure. For details, see Media Stream Encryption.
enabled | Whether to enable the built-in encryption.
|
config | Configurations of built-in encryption schemas. See EncryptionConfig . |
|
abstract |
Publishes the local stream to a specified CDN streaming URL.
After calling this method, you can push media streams in RTMP or RTMPS protocol to the CDN. The SDK triggers the onRtmpStreamingStateChanged
callback on the local client to report the state of adding a local stream to the CDN.
LIVE_BROADCASTING
only.url | The CDN streaming URL in the RTMP or RTMPS format. The maximum length of this parameter is 1024 bytes. The URL address must not contain special characters, such as Chinese language characters. |
transcodingEnabled | Whether to enable transcoding. If you set this parameter as true , ensure that you call the setLiveTranscoding method before this method.
|
ERR_INVALID_ARGUMENT(2)
: Invalid parameter, usually because the URL address is null or the string length is 0.ERR_NOT_INITIALIZED(7)
: You have not initialized RtcEngine when publishing the stream.
|
abstract |
Removes an RTMP or RTMPS stream from the CDN.
This method removes the CDN streaming URL (added by addPublishStreamUrl
) from a CDN live stream. The SDK reports the result of this method call in the onRtmpStreamingStateChanged
callback.
LIVE_BROADCASTING
only.url | The CDN streaming URL to be removed. The maximum length of this parameter is 1024 bytes. The URL address must not contain special characters, such as Chinese language characters. |
|
abstract |
Sets the video layout and audio settings for CDN live.
The SDK triggers the onTranscodingUpdated
callback when you call this method to update the LiveTranscoding
class. If you call this method to set the LiveTranscoding
class for the first time, the SDK does not trigger the onTranscodingUpdated
callback.
LIVE_BROADCASTING
only.setClientRole
method and set the user role as the host.setLiveTranscoding
method before calling the addPublishStreamUrl
method.transcoding | The CDN live audio/video transcoding settings. See LiveTranscoding . |
|
abstract |
Starts pushing media streams to a CDN without transcoding.
You can call this method to push a live audio-and-video stream to the specified CDN address. This method can push media streams to only one CDN address at a time, so if you need to push streams to multiple addresses, call this method multiple times.
After you call this method, the SDK triggers the onRtmpStreamingStateChanged
callback on the local client to report the state of the streaming.
LIVE_BROADCASTING
profile can call this method.stopRtmpStream
first, then call this method to retry pushing streams; otherwise, the SDK returns the same error code as the last failed push.url | The address of the CDN live streaming. The format is RTMP or RTMPS. The character length cannot exceed 1024 bytes. Special characters such as Chinese characters are not supported. |
ERR_INVALID_ARGUMENT(2)
: url
is null or the string length is 0.ERR_NOT_INITIALIZED(7)
: The SDK is not initialized before calling this method.
|
abstract |
Starts pushing media streams to a CDN and sets the transcoding configuration.
You can call this method to push a live audio-and-video stream to the specified CDN address and set the transcoding configuration. This method can push media streams to only one CDN address at a time, so if you need to push streams to multiple addresses, call this method multiple times.
After you call this method, the SDK triggers the onRtmpStreamingStateChanged
callback on the local client to report the state of the streaming.
LIVE_BROADCASTING
profile can call this method.stopRtmpStream
first, then call this method to retry pushing streams; otherwise, the SDK returns the same error code as the last failed push.url | The address of the CDN live streaming. The format is RTMP or RTMPS. The character length cannot exceed 1024 bytes. Special characters such as Chinese characters are not supported. |
transcoding | The transcoding configuration for CDN live streaming. LiveTranscoding . |
ERR_INVALID_ARGUMENT(2)
: url
is null or the string length is 0.ERR_NOT_INITIALIZED(7)
: The SDK is not initialized before calling this method.
|
abstract |
Updates the transcoding configuration.
After you start pushing media streams to CDN with transcoding, you can dynamically update the transcoding configuration according to the scenario. The SDK triggers the onTranscodingUpdated
callback after the transcoding configuration is updated.
transcoding | The transcoding configuration for CDN live streaming. LiveTranscoding . |
|
abstract |
Stops pushing media streams to a CDN.
You can call this method to stop the live stream on the specified CDN address. This method can stop pushing media streams to only one CDN address at a time, so if you need to stop pushing streams to multiple addresses, call this method multiple times.
After you call this method, the SDK triggers the onRtmpStreamingStateChanged
callback on the local client to report the state of the streaming.
url | The address of the CDN live streaming. The format is RTMP or RTMPS. The character length cannot exceed 1024 bytes. Special characters such as Chinese characters are not supported. |
|
abstract |
Creates a data stream.
createDataStream
[2/2] method instead.Each user can create up to five data streams during the lifecycle of the RtcEngine.
Ensure that you call this method after joining a channel.
reliable
as true
while setting ordered
as false
.reliable | Whether or not the recipients are guaranteed to receive the data stream from the sender within five seconds:
|
ordered | Sets whether or not the recipients receive the data stream in the sent order:
|
Error Codes
. For example, if it returns -2, then it indicates ERR_INVALID_ARGUMENT(-2)
in Error Codes
.
|
abstract |
Creates a data stream.
Each user can create up to five data streams in a single channel.
This method does not support data reliability. If the receiver receives a data packet five seconds or more after it was sent, the SDK directly discards the data.
config | The configurations for the data stream: DataStreamConfig . |
|
abstract |
Sends data stream messages.
The SDK has the following restrictions on this method:
A successful sendStreamMessage method call triggers the onStreamMessage
callback on the remote client, from which the remote user gets the stream message.
A failed sendStreamMessage method call triggers the onStreamMessageError
callback on the remote client.
createDateStream
before calling this method.COMMUNICATION
profile or to hosts in the LIVE_BROADCASTING
profile. streamId | ID of the sent data stream returned by the createDataStream method. |
message | Sent data. |
COMMUNICATION
profile or to hosts in the LIVE_BROADCASTING
profile. If an audience in the LIVE_BROADCASTING
profile calls this method, the audience role may be changed to a host.
|
abstract |
Sets the preference option for the video quality (LIVE_BROADCASTING
only).
degradationPrefer
parameter in the VideoEncoderConfiguration
Class to set the video preference. preferFrameRateOverImageQuality | Sets the video quality preference:
|
|
abstract |
Sets the local video mirror mode.
setupLocalVideo
or setLocalRenderMode
instead.setupLocalVideo
.mode | The local video mirror mode:
|
|
static |
Gets the recommended encoder type.
HARDWARE ENCODER(1)
: The hardware encoder. SOFTWARE ENCODER(2)
: The software encoder.
|
abstract |
Switches between front and rear cameras.
Ensure that you call this method after the camera starts, for example, by calling startPreview
or joinChannel
.
|
abstract |
Checks whether the camera zoom function is supported.
Ensure that you call this method after the camera starts, for example, by calling startPreview
or joinChannel
.
|
abstract |
Checks whether the device supports enabling the flash.
The SDK uses the front camera by default, so if you call isCameraTorchSupported
directly, you can find out from the return value whether the device supports enabling the flash when using the front camera. If you want to check whether the device supports enabling the flash when using the rear camera, call switchCamera
to switch the camera used by the SDK to the rear camera, and then call isCameraTorchSupported
.
|
abstract |
Checks whether the camera manual focus function is supported.
Ensure that you call this method after the camera starts, for example, by calling startPreview
or joinChannel
.
|
abstract |
Checks whether the camera exposure function is supported.
Ensure that you call this method after the camera starts, for example, by calling startPreview
or joinChannel
.
|
abstract |
Checks whether the camera auto-face focus function is supported.
Ensure that you call this method after the camera starts, for example, by calling startPreview
or joinChannel
.
|
abstract |
Sets the camera zoom ratio.
Ensure that you call this method after the camera starts, for example, by calling startPreview
or joinChannel
.
factor | Sets the camera zoom factor. The value ranges between 1.0 and the maximum zoom supported by the device. |
|
abstract |
Gets the maximum zoom ratio supported by the camera.
Ensure that you call this method after the camera starts, for example, by calling startPreview
or joinChannel
.
|
abstract |
Sets the camera manual focus position.
Ensure that you call this method after the camera starts, for example, by calling startPreview
or joinChannel
.
A successful setCameraFocusPositionInPreview method call triggers the onCameraFocusAreaChanged
callback on the local client.
positionX | The horizontal coordinate of the touch point in the view. |
positionY | The vertical coordinate of the touch point in the view. |
|
abstract |
Sets the camera exposure position.
Ensure that you call this method after the camera starts, for example, by calling startPreview
or joinChannel
.
A successful setCameraExposurePosition method call triggers the onCameraExposureAreaChanged
callback on the local client.
positionXinView | The horizontal coordinate of the touch point in the view. |
positionYinView | The vertical coordinate of the touch point in the view. |
|
abstract |
Enables/Disables face detection for the local user.
Once face detection is enabled, the SDK triggers the onFacePositionChanged
callback to report the face information of the local user, which includes the following aspects:
enable | Whether to enable the face detection function for the local user:
|
|
abstract |
Sets whether to enable the flash.
isOn | Determines whether to enable the flash:
|
|
abstract |
Sets whether to enable face autofocus.
The SDK disables face autofocus by default. To set face autofocus, call this method.
enabled | Determines whether to enable face autofocus:
|
|
abstract |
Gets the current call ID.
When a user joins a channel on a client, a call ID is generated to identify the call from the client. Feedback methods, such as the rate
and complain
method, must be called after the call ends to submit feedback to the SDK.
The rate
and complain
methods require the callId
parameter retrieved from the getCallId
method during a call. callId
is passed as an argument into the rate
and complain
methods after the call ends.
Ensure that you call this method after joining a channel.
|
abstract |
Allows the user to rate a call after the call ends.
Ensure that you call this method after leaving a channel.
callId | ID of the call retrieved from the getCallId method. |
rating | Rating of the call. The value is between 1 (lowest score) and 5 (highest score). If you set a value out of this range, the ERR_INVALID_ARGUMENT(-2) error occurs. |
description | (Optional) The description of the rating. The string length must be less than 800 bytes. |
|
abstract |
Allows a user to complain about the call quality after a call ends.
Ensure that you call this method after leaving a channel.
callId | ID of the call retrieved from the getCallId method. |
description | (Optional) The description of the complaint. The string length must be less than 800 bytes. |
|
static |
Gets the SDK version.
You can call this method either before or after joining a channel.
|
static |
Gets the media engine version.
|
abstract |
Sets the log files that the SDK outputs.
mLogConfig
parameter in create
[2/2] to set the log file path instead.By default, the SDK outputs five log files, agorasdk.log, agorasdk_1.log, agorasdk_2.log, agorasdk_3.log, agorasdk_4.log, each with a default size of 1024 KB. These log files are encoded in UTF-8. The SDK writes the latest logs in agorasdk.log. When agorasdk.log is full, the SDK deletes the log file with the earliest modification time among the other four, renames agorasdk.log to the name of the deleted log file, and creates a new agorasdk.log to record latest logs.
create
method, otherwise the output log may not be complete. filePath | The absolute path of log files. The default file path is /storage/emulated/0/Android/data/<package_name>/files/agorasdk.log . Ensure that the directory for the log files exists and is writable. You can use this parameter to rename the log files. |
|
abstract |
Sets the output log level of the SDK.
mLogConfig
parameter in the create
[2/2] method instead.You can use one or a combination of the filters. The log level follows the sequence of OFF, CRITICAL, ERROR, WARNING, INFO, and DEBUG. Choose a level to see the logs preceding that level. For example, if you set the log level to WARNING, you see the logs within levels CRITICAL, ERROR, and WARNING.
filter | The log output level:
|
|
abstract |
Sets the log file size (KB).
mLogConfig
parameter in the create
[2/2] method instead.By default, the SDK outputs five log files, agorasdk.log, agorasdk_1.log, agorasdk_2.log, agorasdk_3.log, agorasdk_4.log, each with a default size of 1024 KB. These log files are encoded in UTF-8. The SDK writes the latest logs in agorasdk.log. When agorasdk.log is full, the SDK deletes the log file with the earliest modification time among the other four, renames agorasdk.log to the name of the deleted log file, and creates a new agorasdk.log to record latest logs.
If you want to set the log file size, ensure that you call setLogFileSize before setLogFile
, or the logs are cleared.
fileSizeInKBytes | The size (KB) of a log file. The default value is 1024 KB. If you set fileSizeInKByte to 1024 KB, the SDK outputs at most 5 MB log files; if you set it to less than 1024 KB, the maximum size of a log file is still 1024 KB. |
|
abstract |
Gets the native handle of the SDK engine.
This interface is used to retrieve the native C++ handle of the SDK engine used in special scenarios, such as registering the audio and video frame observer.
void io.agora.rtc.RtcEngine.addHandler | ( | IRtcEngineEventHandler | handler | ) |
Adds the IRtcEngineEventHandler class.
The SDK uses the IRtcEngineEventHandler interface class to send callbacks to the app, and the app inherits the methods of the IRtcEngineEventHandler interface class to retrieve the callbacks.
handler | IRtcEngineEventHandler |
void io.agora.rtc.RtcEngine.removeHandler | ( | IRtcEngineEventHandler | handler | ) |
Removes the specified IRtcEngineEventHandler object.
For callback events that you only want to listen for once, call this method to remove subsequent IRtcEngineEventHandler objects after you have received them. This interface is used to remove the specific IRtcEngineEventHandler interface class instance.
handler | The IRtcEngineEventHandler object. |
|
abstract |
Enables the Wi-Fi mode.
enable | Whether to enable/disable the Wi-Fi mode:
|
|
static |
Gets the warning or error description.
error | The warning or error code in Warning Code or Error Code . |
|
abstract |
Monitors external headset device events.
monitor | Whether to enable/disable monitoring external headset device events:
|
|
abstract |
Monitors Bluetooth headset device events.
monitor | Whether to enable/disable monitoring Bluetooth headset device events:
|
|
abstract |
Sets the default audio route to the headset.
enabled | Sets whether or not the default audio route is to the headset:
|
|
abstract |
Provides technical preview functionalities or special customizations by configuring the SDK with JSON options.
The JSON options are not public by default. Agora is working on making commonly used JSON options public in a standard way.
parameters | Sets the parameter as a JSON string in the specified format. |
|
abstract |
Gets the Agora SDK’s parameters for customization purposes. This method is not disclosed yet. Contact support@agora.io for more information.
|
abstract |
Registers the metadata observer.
You need to implement the IMetadataObserver class and specify the metadata type in this method. A successful call of this method triggers the getMaxMetadataSize
callback.
This method enables you to add synchronized metadata in the video stream for more diversified live interactions, such as sending shopping links, digital coupons, and online quizzes.
joinChannel
method.observer | The IMetadataObserver class. |
type | The metadata type. Currently, the SDK supports VIDEO_METADATA(0) only.
|
|
abstract |
Starts to relay media streams across channels.
After a successful method call, the SDK triggers the onChannelMediaRelayStateChanged
and onChannelMediaRelayEvent
callbacks, and these callbacks return the state and events of the media stream relay.
onChannelMediaRelayStateChanged
callback returns RELAY_STATE_RUNNING(2) and RELAY_OK(0), and the onChannelMediaRelayEvent
callback returns RELAY_EVENT_PACKET_SENT_TO_DEST_CHANNEL(4), the SDK starts relaying media streams between the original and the destination channel.onChannelMediaRelayStateChanged
callback returns RELAY_STATE_FAILURE(3), an exception occurs during the media stream relay.joinChannel
method.LIVE_BROADCASTING
channel.stopChannelMediaRelay
method to quit the current relay.channelMediaRelayConfiguration | The configuration of the media stream relay: ChannelMediaRelayConfiguration . |
|
abstract |
Stops the media stream relay.
Once the relay stops, the host quits all the destination channels. After a successful method call, the SDK triggers the onChannelMediaRelayStateChanged
callback. If the callback returns RELAY_STATE_IDLE(0) and RELAY_OK(0), the host successfully stops the relay.
onChannelMediaRelayStateChanged
callback with the RELAY_ERROR_SERVER_NO_RESPONSE(2)
or RELAY_ERROR_SERVER_CONNECTION_LOST(8)
error code. You can leave the channel by calling the leaveChannel
method, and the media stream relay automatically stops.
|
abstract |
Updates the channels for media relay.
After the channel media relay starts, if you want to relay the media stream to more channels, or leave the current relay channel, you can call the updateChannelMediaRelay
method.
After a successful method call, the SDK triggers the onChannelMediaRelayEvent
callback with the RELAY_EVENT_PACKET_UPDATE_DEST_CHANNEL(7) state code.
startChannelMediaRelay
method and receiving the onChannelMediaRelayStateChanged(RELAY_STATE_RUNNING, RELAY_OK)
callback; otherwise, this method call fails.removeDestChannelInfo
method in ChannelMediaRelayConfiguration
before calling this method.channelMediaRelayConfiguration | The media stream relay configuration: ChannelMediaRelayConfiguration . |
|
abstract |
Pauses the media stream relay to all destination channels.
After the cross-channel media stream relay starts, you can call this method to pause relaying media streams to all destination channels; after the pause, if you want to resume the relay, call resumeAllChannelMediaRelay
.
After a successful method call, the SDK triggers the onChannelMediaRelayEvent
callback to report whether the media stream relay is successfully paused.
startChannelMediaRelay
method.
|
abstract |
Resumes the media stream relay to all destination channels.
After calling the pauseAllChannelMediaRelay
method, you can call this method to resume relaying media streams to all destination channels.
After a successful method call, the SDK triggers the onChannelMediaRelayEvent
callback to report whether the media stream relay is successfully resumed.
pauseAllChannelMediaRelay
method.
|
abstract |
Creates and gets an RtcChannel instance.
To join more than one channel, call this method multiple times to create as many RtcChannel instances as needed, and call the joinChannel
method of each created RtcChannel object.
After joining multiple channels, you can simultaneously subscribe to streams of all the channels, but publish a stream in only one channel at one time.
channelId | The unique channel name for the AgoraRTC session in the string format. The string length must be less than 64 bytes. Supported character scopes are:
|
channelId
as the empty string "".
|
abstract |
Takes a snapshot of a video stream.
This method takes a snapshot of a video stream from the specified user, generates a JPG image, and saves it to the specified path.
The method is asynchronous, and the SDK has not taken the snapshot when the method call returns. After a successful method call, the SDK triggers the onSnapshotTaken
callback to report whether the snapshot is successfully taken as well as the details of the snapshot taken.
channel | The channel name. |
uid | The user ID of the user. Set uid as 0 if you want to take a snapshot of the local user's video. |
filePath | The local path (including the filename extensions) for the snapshot. For example, /storage/emulated/0/Android/data/<package name>/files/example.jpg . Ensure that the path you specify exists and is writable. |
|
static |
Sets the storage directory of .so
files.
By default, the SDK loads .so
files from the app's nativeLibraryPath
. You can call this method to specify the directory where you store .so
files. After a successful method call, the SDK automatically loads .so
files based on your specified directory when initializing the RtcEngine
instance.
Normally, you need to package required .so
files when compiling the app, but this can increase the app package size. To reduce the app package size, you can call this method to enable the app to load required .so
files dynamically when the app runs. For detailed instructions, see Reduce App Size.
path | The directory where you store .so files, which must be a private directory of the app and can be obtained using Context.getDir() . Ensure the specified directory exists; otherwise, the SDK reports the InvalidParameterException error. |
|
abstract |
Starts screen sharing.
During screen sharing, make sure the user has granted screen capture permission to the application and the Android API level is not earlier than 21; otherwise, the SDK reports error codes ERR_SCREEN_CAPTURE_PERMISSION_DENIED(16)
and ERR_SCREEN_CAPTURE_SYSTEM_NOT_SUPPORTED(2)
. To capture system audio during screen sharing, ensure that the Android API level is not earlier than 29 as well; otherwise, the SDK reports the error code ERR_SCREEN_CAPTURE_SYSTEM_AUDIO_NOT_SUPPORTED(3)
.
android.permission.FOREGROUND_SERVICE
) to the /app/Manifests/AndroidManifest.xml
file.ScreenCaptureParameters
. When you do not pass in these value, Agora bills you at 1280 × 720; when you pass in these values, Agora bills you at those value. For details, see Pricing for Real-time Communication.GAME_STREAMING
by using the setAudioProfile
method before joining the channel. For example, call setAudioProfile(Constants.AUDIO_PROFILE_DEFAULT,Constants.AUDIO_SCENARIO_GAME_STREAMING)
.screenCaptureParameters | The configuration of the screen sharing. See ScreenCaptureParameters . |
|
abstract |
Stops screen sharing.
After calling this method to stop screen sharing, call setVideoSource(new AgoraDefaultSource())
if you want to switch to the camera to capture user video or call startScreenCapture
if you want to restart screen sharing.
|
abstract |
Updates the screen sharing configuration.
startScreenCapture
.captureVideo | Determines whether to capture the screen during screen sharing:
|
Note: Due to system limitations, screen capture is only available for Android API level 21 and later (that is, Android 5 and later).
captureAudio | Determines whether to capture system audio during screen sharing:
|
Note: Due to system limitations, capturing system audio is only available for Android API level 29 and later (that is, Android 10 and later).
videoCaptureParameters | The video configuration for the shared screen stream. See VideoCaptureParameters . |
Note: This parameter is only available for scenarios where captureVideo
is true
.