aboutsummaryrefslogtreecommitdiff
path: root/plugins/rtp/src
diff options
context:
space:
mode:
Diffstat (limited to 'plugins/rtp/src')
-rw-r--r--plugins/rtp/src/device.vala33
-rw-r--r--plugins/rtp/src/plugin.vala6
-rw-r--r--plugins/rtp/src/voice_processor_native.cpp6
3 files changed, 35 insertions, 10 deletions
diff --git a/plugins/rtp/src/device.vala b/plugins/rtp/src/device.vala
index 3c650ad6..e25271b1 100644
--- a/plugins/rtp/src/device.vala
+++ b/plugins/rtp/src/device.vala
@@ -87,6 +87,7 @@ public class Dino.Plugins.Rtp.Device : MediaDevice, Object {
return Gst.Caps.from_string("audio/x-raw,rate=48000,channels=1");
} else if (media == "video" && device.caps.get_size() > 0) {
int best_index = 0;
+ Value? best_fraction = null;
int best_fps = 0;
int best_width = 0;
int best_height = 0;
@@ -94,7 +95,28 @@ public class Dino.Plugins.Rtp.Device : MediaDevice, Object {
unowned Gst.Structure? that = device.caps.get_structure(i);
if (!that.has_name("video/x-raw")) continue;
int num = 0, den = 0, width = 0, height = 0;
- if (!that.has_field("framerate") || !that.get_fraction("framerate", out num, out den)) continue;
+ if (!that.has_field("framerate")) continue;
+ Value framerate = that.get_value("framerate");
+ if (framerate.type() == typeof(Gst.Fraction)) {
+ num = Gst.Value.get_fraction_numerator(framerate);
+ den = Gst.Value.get_fraction_denominator(framerate);
+ } else if (framerate.type() == typeof(Gst.ValueList)) {
+ for(uint j = 0; j < Gst.ValueList.get_size(framerate); j++) {
+ Value fraction = Gst.ValueList.get_value(framerate, j);
+ int in_num = Gst.Value.get_fraction_numerator(fraction);
+ int in_den = Gst.Value.get_fraction_denominator(fraction);
+ int fps = den > 0 ? (num/den) : 0;
+ int in_fps = in_den > 0 ? (in_num/in_den) : 0;
+ if (in_fps > fps) {
+ best_fraction = fraction;
+ num = in_num;
+ den = in_den;
+ }
+ }
+ } else {
+ debug("Unknown type for framerate: %s", framerate.type_name());
+ }
+ if (den == 0) continue;
if (!that.has_field("width") || !that.get_int("width", out width)) continue;
if (!that.has_field("height") || !that.get_int("height", out height)) continue;
int fps = num/den;
@@ -105,7 +127,14 @@ public class Dino.Plugins.Rtp.Device : MediaDevice, Object {
best_index = i;
}
}
- return caps_copy_nth(device.caps, best_index);
+ Gst.Caps res = caps_copy_nth(device.caps, best_index);
+ unowned Gst.Structure? that = res.get_structure(0);
+ Value framerate = that.get_value("framerate");
+ if (framerate.type() == typeof(Gst.ValueList)) {
+ that.set_value("framerate", best_fraction);
+ }
+ debug("Selected caps %s", res.to_string());
+ return res;
} else if (device.caps.get_size() > 0) {
return caps_copy_nth(device.caps, 0);
} else {
diff --git a/plugins/rtp/src/plugin.vala b/plugins/rtp/src/plugin.vala
index f575a7d0..19a266b1 100644
--- a/plugins/rtp/src/plugin.vala
+++ b/plugins/rtp/src/plugin.vala
@@ -136,11 +136,7 @@ public class Dino.Plugins.Rtp.Plugin : RootInterface, VideoCallPlugin, Object {
pipe.set_state(Gst.State.PLAYING);
break;
case Gst.MessageType.STATE_CHANGED:
- Gst.State new_state;
- message.parse_state_changed(null, out new_state, null);
- if (message.src is Gst.Element) {
- debug("%s changed state to %s", ((Gst.Element)message.src).name, new_state.to_string());
- }
+ // Ignore
break;
case Gst.MessageType.STREAM_STATUS:
Gst.StreamStatusType status;
diff --git a/plugins/rtp/src/voice_processor_native.cpp b/plugins/rtp/src/voice_processor_native.cpp
index 9b3292b8..00f719e1 100644
--- a/plugins/rtp/src/voice_processor_native.cpp
+++ b/plugins/rtp/src/voice_processor_native.cpp
@@ -75,7 +75,7 @@ dino_plugins_rtp_voice_processor_analyze_reverse_stream(void *native_ptr, GstAud
memcpy(frame.data_, audio_buffer.planes[0], frame.samples_per_channel_ * info->bpf);
int err = apm->AnalyzeReverseStream(&frame);
- if (err < 0) g_warning("ProcessReverseStream %i", err);
+ if (err < 0) g_warning("voice_processor_native.cpp: ProcessReverseStream %i", err);
gst_audio_buffer_unmap(&audio_buffer);
}
@@ -108,7 +108,7 @@ extern "C" void dino_plugins_rtp_voice_processor_adjust_stream_delay(void *nativ
g_debug("voice_processor_native.cpp: Stream delay metrics: %i %i %f", median, std, fraction_poor_delays);
if (fraction_poor_delays > 0.5) {
native->stream_delay = std::max(0, native->stream_delay + std::min(-10, std::max(median, 10)));
- g_debug("Adjusted stream delay %i", native->stream_delay);
+ g_debug("voice_processor_native.cpp: Adjusted stream delay %i", native->stream_delay);
}
}
@@ -130,7 +130,7 @@ dino_plugins_rtp_voice_processor_process_stream(void *native_ptr, GstAudioInfo *
apm->set_stream_delay_ms(native->stream_delay);
int err = apm->ProcessStream(&frame);
if (err >= 0) memcpy(audio_buffer.planes[0], frame.data_, frame.samples_per_channel_ * info->bpf);
- if (err < 0) g_warning("ProcessStream %i", err);
+ if (err < 0) g_warning("voice_processor_native.cpp: ProcessStream %i", err);
gst_audio_buffer_unmap(&audio_buffer);
}